diff --git a/.github/goreleaser-for-darwin.yaml b/.github/goreleaser-for-darwin.yaml new file mode 100644 index 000000000..c5a121fc2 --- /dev/null +++ b/.github/goreleaser-for-darwin.yaml @@ -0,0 +1,67 @@ +# Copyright (c) 2022-present Bytebase (Hong Kong) Limited. +# +# Portions of this software are licensed as follows: +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +before: + hooks: + - go mod download +builds: + - id: sour + main: ./cmd/server/. + binary: sour + ldflags: + - -w -s + - -X github.com/cfoust/sour/pkg/version.Version={{.Version}} + - -X github.com/cfoust/sour/pkg/version.GoVersion=${GO_VERSION} + - -X github.com/cfoust/sour/pkg/version.GitCommit={{.Commit}} + - -X github.com/cfoust/sour/pkg/version.BuildTime={{.Date}} + tags: + - release + - embed_frontend + env: + - VERSION="development" + - GO_VERSION="1.23" + - CGO_ENABLED=1 + - GIT_COMMIT="unknown" + - BUILD_TIME="unknown" + - BUILD_USER="unknown" + goos: + - darwin + goarch: + #- amd64 + - arm64 +snapshot: + name_template: "{{ incpatch .Version }}-next" +archives: + - builds: + - sour + files: + - LICENSE* + - README* + - src: "assets/dist/*" + dst: "assets" +brews: + - skip_upload: true + homepage: "https://github.com/cfoust/sour" + description: "A Sauerbraten server for the modern era." + license: "MIT" + repository: + owner: cfoust + name: homebrew-taps diff --git a/.github/goreleaser-for-linux.yaml b/.github/goreleaser-for-linux.yaml new file mode 100644 index 000000000..74a01940c --- /dev/null +++ b/.github/goreleaser-for-linux.yaml @@ -0,0 +1,68 @@ +# Copyright (c) 2022-present Bytebase (Hong Kong) Limited. +# +# Portions of this software are licensed as follows: +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +before: + hooks: + - go mod download +builds: + - id: sour + main: ./cmd/server/. + binary: sour + ldflags: + - -w -s + - -X github.com/cfoust/sour/pkg/version.Version={{.Version}} + - -X github.com/cfoust/sour/pkg/version.GoVersion=${GO_VERSION} + - -X github.com/cfoust/sour/pkg/version.GitCommit={{.Commit}} + - -X github.com/cfoust/sour/pkg/version.BuildTime={{.Date}} + tags: + - release + - embed_frontend + env: + - VERSION="development" + - GO_VERSION="1.23" + - CGO_ENABLED=1 + - GIT_COMMIT="unknown" + - BUILD_TIME="unknown" + - BUILD_USER="unknown" + goos: + - linux + goarch: + - amd64 + overrides: + - goos: linux + goarch: amd64 + env: + - CC=gcc +snapshot: + name_template: "{{ incpatch .Version }}-next" +archives: + - builds: + - sour + files: + - LICENSE* + - README* + - src: "assets/dist/*" + dst: "assets" +brews: + - skip_upload: true + repository: + owner: cfoust + name: homebrew-taps diff --git a/.github/goreleaser-release.yaml b/.github/goreleaser-release.yaml new file mode 100644 index 000000000..39d5e90ca --- /dev/null +++ b/.github/goreleaser-release.yaml @@ -0,0 +1,42 @@ +# Copyright (c) 2022-present Bytebase (Hong Kong) Limited. +# +# Portions of this software are licensed as follows: +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +builds: + - skip: true +changelog: + sort: asc + filters: + exclude: + - "^docs:" + - "^test:" +release: + draft: true + mode: append + extra_files: + - glob: ./sour-build/**/* + - glob: ./sour-build/checksums.txt + name_template: "Release {{.Tag}}" + header: | + # What's Changed + + ## Features + + ## Code Diff diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c0a6bf2da..f25d0a1b1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,58 +1,338 @@ -# Taken from the Earthly documentation -# https://docs.earthly.dev/ci-integration/vendor-specific-guides/gh-actions-integration -name: Publish slim image +# Copyright (c) 2022-present Bytebase (Hong Kong) Limited. +# +# Portions of this software are licensed as follows: +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +name: Build release artifacts and draft release on: push: - branches: [ main ] + branches: [main] pull_request: - branches: [ main ] + branches: [main] jobs: - build: + build-game: runs-on: ubuntu-latest - env: - FORCE_COLOR: 1 steps: - - uses: actions/checkout@v3 - with: - lfs: true - - - name: Checkout LFS objects - run: git lfs checkout - - - name: Login to GitHub Container Registry - uses: docker/login-action@v1 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Put back the git branch into git (Earthly uses it for tagging) - run: | - branch="" - if [ -n "$GITHUB_HEAD_REF" ]; then - branch="$GITHUB_HEAD_REF" - else - branch="${GITHUB_REF##*/}" - fi - git checkout -b "$branch" || true - - - name: Download latest earthly - run: "sudo /bin/sh -c 'wget https://github.com/earthly/earthly/releases/download/v0.6.10/earthly-linux-amd64 -O /usr/local/bin/earthly && chmod +x /usr/local/bin/earthly'" - - - name: Earthly version - run: earthly --version - - - name: Run build - run: | - branch="" - if [ -n "$GITHUB_HEAD_REF" ]; then - branch="$GITHUB_HEAD_REF" - else - branch="${GITHUB_REF##*/}" - fi - earthly --build-arg tag="$GITHUB_SHA" --ci --push +github - if [ "$branch" = "main" ]; then - earthly --build-arg tag="latest" --ci --push +github - fi + - uses: actions/checkout@v4 + + - name: Install Emscripten + run: | + sudo apt-get install -y build-essential cmake zlib1g-dev + git clone https://github.com/emscripten-core/emsdk.git + cd emsdk + ./emsdk install 3.1.8 + ./emsdk activate 3.1.8 + + - name: Build Sauerbraten + run: | + source emsdk/emsdk_env.sh + cd game + ./build + + - name: Upload + uses: actions/upload-artifact@v4 + with: + name: game + path: game/dist/game/* + + build-client: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + lfs: true + + - uses: actions/setup-node@v4 + with: + node-version: "14.17.5" + + - name: Build web client + run: | + cd client + yarn install + yarn build + cp src/index.html src/favicon.ico src/background.png dist/ + + - name: Upload + uses: actions/upload-artifact@v4 + with: + name: client + path: client/dist/* + + build-assets: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + lfs: true + + - name: Install Emscripten + run: | + sudo apt-get install -y \ + build-essential \ + cmake \ + imagemagick \ + inotify-tools \ + ucommon-utils \ + unrar \ + zlib1g-dev + git clone https://github.com/emscripten-core/emsdk.git + cd emsdk + ./emsdk install 3.1.8 + ./emsdk activate 3.1.8 + + - name: Build sourdump + run: | + cd assets + mkdir dist + ./setup + + - name: Build desktop assets + run: | + source emsdk/emsdk_env.sh + cd assets + python3 base.py \ + --root https://static.sourga.me/blobs/6481/.index.source \ + --models \ + --download \ + --outdir dist \ + complex dust2 turbine + + - name: Upload + uses: actions/upload-artifact@v4 + with: + name: assets + path: assets/dist/* + include-hidden-files: true + + build-linux-binary: + needs: [build-assets, build-client, build-game] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Download web client + uses: actions/download-artifact@v4 + with: + name: client + path: ./cmd/server/static/site + + - name: Download game + uses: actions/download-artifact@v4 + with: + name: game + path: ./cmd/server/static/site/game + + - name: Download assets + uses: actions/download-artifact@v4 + with: + name: assets + path: ./assets/dist + + - uses: actions/setup-go@v4 + with: + go-version: 1.23 + + - name: Build enet + run: | + cd pkg/enet/enet + make + + - name: Build + uses: goreleaser/goreleaser-action@v5 + with: + args: release --skip=publish --snapshot --config .github/goreleaser-for-linux.yaml + version: v1.23.0 + + - name: Copy homebrew + run: | + cp dist/homebrew/sour.rb dist/sour.rb + rm dist/*/sour + + - name: Upload + uses: actions/upload-artifact@v4 + with: + name: sour-linux + path: dist/sour* + + build-darwin-binary: + needs: [build-assets, build-client, build-game] + runs-on: macos-latest + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-go@v4 + with: + go-version: 1.23 + + - name: Download web client + uses: actions/download-artifact@v4 + with: + name: client + path: ./cmd/server/static/site + + - name: Download game + uses: actions/download-artifact@v4 + with: + name: game + path: ./cmd/server/static/site/game + + - name: Download assets + uses: actions/download-artifact@v4 + with: + name: assets + path: ./assets/dist + + - name: Build enet + run: | + cd pkg/enet/enet + make + + - name: Set up Homebrew + uses: Homebrew/actions/setup-homebrew@master + + - name: Install swig + run: | + brew install automake + cd /tmp + git clone https://github.com/swig/swig.git + cd swig + git checkout v4.1.1 + ./autogen.sh + ./configure + make + sudo make install + + - name: Build + uses: goreleaser/goreleaser-action@v5 + with: + args: release --skip=publish --snapshot --config .github/goreleaser-for-darwin.yaml + version: v1.23.0 + + - name: Copy homebrew + run: | + cp dist/homebrew/sour.rb dist/sour.rb + rm dist/*/sour + + - name: Upload + uses: actions/upload-artifact@v4 + with: + name: sour-darwin + path: dist/sour* + + draft-release: + needs: [build-linux-binary, build-darwin-binary] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Store version in environment + run: | + TAG=$(git describe --tags --always --abbrev=0) + echo "SOUR_VERSION=${TAG:1}" >> $GITHUB_ENV + - uses: actions/setup-go@v4 + with: + go-version: 1.23 + - name: Make directories + run: | + mkdir -p ./sour-build/linux + mkdir -p ./sour-build/darwin + - name: Download linux binaries + uses: actions/download-artifact@v4 + with: + name: sour-linux + path: ./sour-build/linux + - name: Download darwin binaries + uses: actions/download-artifact@v4 + with: + name: sour-darwin + path: ./sour-build/darwin + - name: Create tap PR + run: | + # Remove last line from darwin formula + sed '$d' ./sour-build/darwin/sour.rb | grep -v 'depends_on' > sour.rb + # Include all other lines from linux + sed -n '/on_linux/,$p' ./sour-build/linux/sour.rb >> sour.rb + + # Create versioned formula + cp sour.rb "sour@${SOUR_VERSION}.rb" + + # PR flow inspired by: + # https://github.com/orgs/community/discussions/49745#discussioncomment-5275999 + REPOSITORY="cfoust/homebrew-taps" + DIRECTORY="homebrew-taps" + BRANCH="feat/update-to-$SOUR_VERSION" + + # Clone the remote repository and change working directory to the + # folder it was cloned to. + git clone \ + --depth=1 \ + --branch=main \ + https://some-user:${{ secrets.ACCESS_TOKEN }}@github.com/$REPOSITORY \ + $DIRECTORY + + cd $DIRECTORY + + # Setup the committer's identity. + git config user.email "cfoust@sqweebloid.com" + git config user.name "SourBot" + + # Create a new feature branch for the changes. + git checkout -b $BRANCH + + # Update the script files to the latest version. + cp ../sour*.rb . + + # Commit the changes and push the feature branch to origin + git add . + git commit -m "feat: update sour formula to $SOUR_VERSION" + git push origin "$BRANCH" + + # Store the PAT in a file that can be accessed by the + # GitHub CLI. + echo "${{ secrets.ACCESS_TOKEN }}" > token.txt + + # Authorize GitHub CLI for the current repository and + # create a pull-requests containing the updates. + gh auth login --with-token < token.txt + gh pr create \ + --body "" \ + --title "update sour to $SOUR_VERSION" \ + --head "$BRANCH" \ + --base "main" + + cd .. + rm *.rb + rm -r "$DIRECTORY" + - name: Merge checksum file + run: | + cd ./sour-build + cat ./darwin/sour*checksums.txt >> checksums.txt + cat ./linux/sour*checksums.txt >> checksums.txt + rm ./darwin/sour*checksums.txt + rm ./linux/sour*checksums.txt + # where do these come from? + rm -f sour{,.rb} linux/sour{,.rb} darwin/sour{,.rb} + - name: Release + uses: goreleaser/goreleaser-action@v5 + with: + args: release --config ci/goreleaser-release.yaml + version: v1.23.0 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index aadc2e4f9..1d91e49f1 100644 --- a/.gitignore +++ b/.gitignore @@ -5,4 +5,5 @@ earthly/** **.o.tmp /dump.rdb /state.db -/assets +/sour +/sourdump diff --git a/.gitpod.yml b/.gitpod.yml deleted file mode 100644 index 2a42da71c..000000000 --- a/.gitpod.yml +++ /dev/null @@ -1,60 +0,0 @@ -github: - prebuilds: - addComment: true - -image: - file: gitpod/Dockerfile - -tasks: - - name: Ingress - init: > - git lfs pull && - /usr/bin/gp sync-done lfs - command: ./services/ingress/build - - - name: WebSocket proxy - init: ./services/proxy/build - command: ./watch /workspace/sour/services/proxy ./wsproxy 28785 - - - name: Game server - init: /usr/bin/gp sync-await lfs && ./services/go/build - command: ./watch /workspace/sour/services/go ./cluster - - - name: Config - init: ./config/build - command: CONFIG=1 ./watch /workspace/sour/config cat - - - name: Redis - command: /usr/local/bin/redis-server /workspace/sour/services/redis/redis.conf - - - name: Client - init: > - /usr/bin/gp sync-await lfs && - cd services/client && - yarn install - command: /workspace/sour/services/client/build - - - name: Assets - env: - ASSET_OUTPUT_DIR: /workspace/sour/build/assets - init: > - /usr/bin/gp sync-await lfs && - ./services/assets/build - command: ./watch /workspace/sour/services/assets cat - - - name: Sauerbraten - init: > - /usr/bin/gp sync-await lfs && - ./services/game/build - env: - GAME_OUTPUT_DIR: /workspace/sour/build/game - command: ./watch /workspace/sour/services/game cat - -ports: - - port: 1234 - onOpen: open-preview - visibility: public - - - port: 28785 - onOpen: ignore - visibility: public diff --git a/services/assets/.gitignore b/assets/.gitignore similarity index 100% rename from services/assets/.gitignore rename to assets/.gitignore diff --git a/assets/.ignore b/assets/.ignore new file mode 100644 index 000000000..7e754e04c --- /dev/null +++ b/assets/.ignore @@ -0,0 +1 @@ +/shims diff --git a/services/assets/README.md b/assets/README.md similarity index 100% rename from services/assets/README.md rename to assets/README.md diff --git a/services/assets/archive b/assets/archive similarity index 100% rename from services/assets/archive rename to assets/archive diff --git a/services/assets/base.list b/assets/base.list similarity index 100% rename from services/assets/base.list rename to assets/base.list diff --git a/services/assets/base.py b/assets/base.py similarity index 100% rename from services/assets/base.py rename to assets/base.py diff --git a/services/assets/build b/assets/build similarity index 100% rename from services/assets/build rename to assets/build diff --git a/services/assets/build-base b/assets/build-base similarity index 100% rename from services/assets/build-base rename to assets/build-base diff --git a/services/assets/cbor2.pyi b/assets/cbor2.pyi similarity index 100% rename from services/assets/cbor2.pyi rename to assets/cbor2.pyi diff --git a/services/assets/ci b/assets/ci similarity index 100% rename from services/assets/ci rename to assets/ci diff --git a/services/assets/dump-source.py b/assets/dump-source.py similarity index 100% rename from services/assets/dump-source.py rename to assets/dump-source.py diff --git a/services/assets/package.py b/assets/package.py similarity index 100% rename from services/assets/package.py rename to assets/package.py diff --git a/services/assets/quadropolis.py b/assets/quadropolis.py similarity index 100% rename from services/assets/quadropolis.py rename to assets/quadropolis.py diff --git a/services/assets/requirements.txt b/assets/requirements.txt similarity index 100% rename from services/assets/requirements.txt rename to assets/requirements.txt diff --git a/services/assets/setup b/assets/setup similarity index 88% rename from services/assets/setup rename to assets/setup index 4e084c639..03c841f6d 100755 --- a/services/assets/setup +++ b/assets/setup @@ -26,7 +26,7 @@ fi #fi if ! [ -f "sourdump" ]; then - cd ../go - go build -o sourdump cmd/sourdump/main.go - mv sourdump ../assets + cd .. + go build -o assets/sourdump cmd/sourdump/main.go + cd assets fi diff --git a/services/assets/setup-quad b/assets/setup-quad similarity index 100% rename from services/assets/setup-quad rename to assets/setup-quad diff --git a/services/assets/shims/0096dc89086a719a46673375008c3d0fb0c28924b73ec7d710a3a0c50fd7ae7c b/assets/shims/0096dc89086a719a46673375008c3d0fb0c28924b73ec7d710a3a0c50fd7ae7c similarity index 100% rename from services/assets/shims/0096dc89086a719a46673375008c3d0fb0c28924b73ec7d710a3a0c50fd7ae7c rename to assets/shims/0096dc89086a719a46673375008c3d0fb0c28924b73ec7d710a3a0c50fd7ae7c diff --git a/services/assets/shims/015fccd0f901512ac085d7dae9ed640fa58654e5f97983ab83a4593a63f9fe5f b/assets/shims/015fccd0f901512ac085d7dae9ed640fa58654e5f97983ab83a4593a63f9fe5f similarity index 100% rename from services/assets/shims/015fccd0f901512ac085d7dae9ed640fa58654e5f97983ab83a4593a63f9fe5f rename to assets/shims/015fccd0f901512ac085d7dae9ed640fa58654e5f97983ab83a4593a63f9fe5f diff --git a/services/assets/shims/01fd176f9fb47a0674aaeb3239aada6e6c181f53f5ebedad3a55b8501f154044 b/assets/shims/01fd176f9fb47a0674aaeb3239aada6e6c181f53f5ebedad3a55b8501f154044 similarity index 100% rename from services/assets/shims/01fd176f9fb47a0674aaeb3239aada6e6c181f53f5ebedad3a55b8501f154044 rename to assets/shims/01fd176f9fb47a0674aaeb3239aada6e6c181f53f5ebedad3a55b8501f154044 diff --git a/services/assets/shims/070c7ab17d183c8142e807312f74976f7462000bd36a28441647f12cd4284cdc b/assets/shims/070c7ab17d183c8142e807312f74976f7462000bd36a28441647f12cd4284cdc similarity index 100% rename from services/assets/shims/070c7ab17d183c8142e807312f74976f7462000bd36a28441647f12cd4284cdc rename to assets/shims/070c7ab17d183c8142e807312f74976f7462000bd36a28441647f12cd4284cdc diff --git a/services/assets/shims/0bc7310b445c329ab72488376a5036bf81fa9656227b4a81bb24be8bd4c14311 b/assets/shims/0bc7310b445c329ab72488376a5036bf81fa9656227b4a81bb24be8bd4c14311 similarity index 100% rename from services/assets/shims/0bc7310b445c329ab72488376a5036bf81fa9656227b4a81bb24be8bd4c14311 rename to assets/shims/0bc7310b445c329ab72488376a5036bf81fa9656227b4a81bb24be8bd4c14311 diff --git a/services/assets/shims/0c4569d63fb6f794c838e2186a45a72b249b9dd04a2b9b371cf3b5ab2ec695e5 b/assets/shims/0c4569d63fb6f794c838e2186a45a72b249b9dd04a2b9b371cf3b5ab2ec695e5 similarity index 100% rename from services/assets/shims/0c4569d63fb6f794c838e2186a45a72b249b9dd04a2b9b371cf3b5ab2ec695e5 rename to assets/shims/0c4569d63fb6f794c838e2186a45a72b249b9dd04a2b9b371cf3b5ab2ec695e5 diff --git a/services/assets/shims/0c54fb80e7846c702af9b1aab7d740d366b0ea3521a5a42a104536f59304e484 b/assets/shims/0c54fb80e7846c702af9b1aab7d740d366b0ea3521a5a42a104536f59304e484 similarity index 100% rename from services/assets/shims/0c54fb80e7846c702af9b1aab7d740d366b0ea3521a5a42a104536f59304e484 rename to assets/shims/0c54fb80e7846c702af9b1aab7d740d366b0ea3521a5a42a104536f59304e484 diff --git a/services/assets/shims/0c86cf6e9c8576d4e1801b926155c32ba002be743976f41edd5afe95efe0cf13 b/assets/shims/0c86cf6e9c8576d4e1801b926155c32ba002be743976f41edd5afe95efe0cf13 similarity index 100% rename from services/assets/shims/0c86cf6e9c8576d4e1801b926155c32ba002be743976f41edd5afe95efe0cf13 rename to assets/shims/0c86cf6e9c8576d4e1801b926155c32ba002be743976f41edd5afe95efe0cf13 diff --git a/services/assets/shims/0d4021ee258308176f434eca06c482a50eae89b93b64a13aaac7b59c4a1be3c4 b/assets/shims/0d4021ee258308176f434eca06c482a50eae89b93b64a13aaac7b59c4a1be3c4 similarity index 100% rename from services/assets/shims/0d4021ee258308176f434eca06c482a50eae89b93b64a13aaac7b59c4a1be3c4 rename to assets/shims/0d4021ee258308176f434eca06c482a50eae89b93b64a13aaac7b59c4a1be3c4 diff --git a/services/assets/shims/0de22217476089e1ee5c375fe62fd05ca9ea18a2804166e693cf60fc8e550d71 b/assets/shims/0de22217476089e1ee5c375fe62fd05ca9ea18a2804166e693cf60fc8e550d71 similarity index 100% rename from services/assets/shims/0de22217476089e1ee5c375fe62fd05ca9ea18a2804166e693cf60fc8e550d71 rename to assets/shims/0de22217476089e1ee5c375fe62fd05ca9ea18a2804166e693cf60fc8e550d71 diff --git a/services/assets/shims/0e10166cefd4240d957859e79f83f837800e99a48ac27734c6ba72df74ff0961 b/assets/shims/0e10166cefd4240d957859e79f83f837800e99a48ac27734c6ba72df74ff0961 similarity index 100% rename from services/assets/shims/0e10166cefd4240d957859e79f83f837800e99a48ac27734c6ba72df74ff0961 rename to assets/shims/0e10166cefd4240d957859e79f83f837800e99a48ac27734c6ba72df74ff0961 diff --git a/services/assets/shims/0e9e769e2e046a7369ccd0c7b37d5d551255f66d39e2ca8d46b61e461e860b80 b/assets/shims/0e9e769e2e046a7369ccd0c7b37d5d551255f66d39e2ca8d46b61e461e860b80 similarity index 100% rename from services/assets/shims/0e9e769e2e046a7369ccd0c7b37d5d551255f66d39e2ca8d46b61e461e860b80 rename to assets/shims/0e9e769e2e046a7369ccd0c7b37d5d551255f66d39e2ca8d46b61e461e860b80 diff --git a/services/assets/shims/0ee68647a2b67d15e88dac285aa97418409d3f76c0b1164782cba2637ef75756 b/assets/shims/0ee68647a2b67d15e88dac285aa97418409d3f76c0b1164782cba2637ef75756 similarity index 100% rename from services/assets/shims/0ee68647a2b67d15e88dac285aa97418409d3f76c0b1164782cba2637ef75756 rename to assets/shims/0ee68647a2b67d15e88dac285aa97418409d3f76c0b1164782cba2637ef75756 diff --git a/services/assets/shims/0f8891010c077cff0a7efeea17a7900d99045c0ef96dc9c0e611affb1388d2ba b/assets/shims/0f8891010c077cff0a7efeea17a7900d99045c0ef96dc9c0e611affb1388d2ba similarity index 100% rename from services/assets/shims/0f8891010c077cff0a7efeea17a7900d99045c0ef96dc9c0e611affb1388d2ba rename to assets/shims/0f8891010c077cff0a7efeea17a7900d99045c0ef96dc9c0e611affb1388d2ba diff --git a/services/assets/shims/0f898e2dc43604d84cbd9b256c5da5124844859d76c331945b51b29dc43f1401 b/assets/shims/0f898e2dc43604d84cbd9b256c5da5124844859d76c331945b51b29dc43f1401 similarity index 100% rename from services/assets/shims/0f898e2dc43604d84cbd9b256c5da5124844859d76c331945b51b29dc43f1401 rename to assets/shims/0f898e2dc43604d84cbd9b256c5da5124844859d76c331945b51b29dc43f1401 diff --git a/services/assets/shims/11032c248520000eb913d28f8d436f257460d9b1ce0550d2f50efa62b2c9cf63 b/assets/shims/11032c248520000eb913d28f8d436f257460d9b1ce0550d2f50efa62b2c9cf63 similarity index 100% rename from services/assets/shims/11032c248520000eb913d28f8d436f257460d9b1ce0550d2f50efa62b2c9cf63 rename to assets/shims/11032c248520000eb913d28f8d436f257460d9b1ce0550d2f50efa62b2c9cf63 diff --git a/services/assets/shims/11498a75b2c35aea1ac33cacaa40cc3aff89a322e5cb1eb779cb75165e087fe7 b/assets/shims/11498a75b2c35aea1ac33cacaa40cc3aff89a322e5cb1eb779cb75165e087fe7 similarity index 100% rename from services/assets/shims/11498a75b2c35aea1ac33cacaa40cc3aff89a322e5cb1eb779cb75165e087fe7 rename to assets/shims/11498a75b2c35aea1ac33cacaa40cc3aff89a322e5cb1eb779cb75165e087fe7 diff --git a/services/assets/shims/1410b9704b3527c41e96f4505de11f4b2525e38780418d14ad62bac3057b05b3 b/assets/shims/1410b9704b3527c41e96f4505de11f4b2525e38780418d14ad62bac3057b05b3 similarity index 100% rename from services/assets/shims/1410b9704b3527c41e96f4505de11f4b2525e38780418d14ad62bac3057b05b3 rename to assets/shims/1410b9704b3527c41e96f4505de11f4b2525e38780418d14ad62bac3057b05b3 diff --git a/services/assets/shims/172dad215b26ba42dd8ff7b412e5cc5d107d7097ddb4fd3ac60a646dcfaf14fc b/assets/shims/172dad215b26ba42dd8ff7b412e5cc5d107d7097ddb4fd3ac60a646dcfaf14fc similarity index 100% rename from services/assets/shims/172dad215b26ba42dd8ff7b412e5cc5d107d7097ddb4fd3ac60a646dcfaf14fc rename to assets/shims/172dad215b26ba42dd8ff7b412e5cc5d107d7097ddb4fd3ac60a646dcfaf14fc diff --git a/services/assets/shims/1a487d500b50df580f4583d6c5794b614cf6d69d53cfd842c06f10caaf68675a b/assets/shims/1a487d500b50df580f4583d6c5794b614cf6d69d53cfd842c06f10caaf68675a similarity index 100% rename from services/assets/shims/1a487d500b50df580f4583d6c5794b614cf6d69d53cfd842c06f10caaf68675a rename to assets/shims/1a487d500b50df580f4583d6c5794b614cf6d69d53cfd842c06f10caaf68675a diff --git a/services/assets/shims/1a5a7410827f2c558cae37ee0a9e30b5705a6714341f6203333557d00aeff97f b/assets/shims/1a5a7410827f2c558cae37ee0a9e30b5705a6714341f6203333557d00aeff97f similarity index 100% rename from services/assets/shims/1a5a7410827f2c558cae37ee0a9e30b5705a6714341f6203333557d00aeff97f rename to assets/shims/1a5a7410827f2c558cae37ee0a9e30b5705a6714341f6203333557d00aeff97f diff --git a/services/assets/shims/1d83cec97b9942af6d6c6b68c74c4b845bb57b7a4ac56f5f807451511e07a239 b/assets/shims/1d83cec97b9942af6d6c6b68c74c4b845bb57b7a4ac56f5f807451511e07a239 similarity index 100% rename from services/assets/shims/1d83cec97b9942af6d6c6b68c74c4b845bb57b7a4ac56f5f807451511e07a239 rename to assets/shims/1d83cec97b9942af6d6c6b68c74c4b845bb57b7a4ac56f5f807451511e07a239 diff --git a/services/assets/shims/20f72a837babe4a9908757271249f6f021afb582d30b61aa562128648f621254 b/assets/shims/20f72a837babe4a9908757271249f6f021afb582d30b61aa562128648f621254 similarity index 100% rename from services/assets/shims/20f72a837babe4a9908757271249f6f021afb582d30b61aa562128648f621254 rename to assets/shims/20f72a837babe4a9908757271249f6f021afb582d30b61aa562128648f621254 diff --git a/services/assets/shims/2306c587bd0e037c386327ae632836a532cb9b8d9df965e430e8e8b818f75806 b/assets/shims/2306c587bd0e037c386327ae632836a532cb9b8d9df965e430e8e8b818f75806 similarity index 100% rename from services/assets/shims/2306c587bd0e037c386327ae632836a532cb9b8d9df965e430e8e8b818f75806 rename to assets/shims/2306c587bd0e037c386327ae632836a532cb9b8d9df965e430e8e8b818f75806 diff --git a/services/assets/shims/238b9d2a8b155854c345e182ed5bb93fde663f38a41991747c4b6976bb1977de b/assets/shims/238b9d2a8b155854c345e182ed5bb93fde663f38a41991747c4b6976bb1977de similarity index 100% rename from services/assets/shims/238b9d2a8b155854c345e182ed5bb93fde663f38a41991747c4b6976bb1977de rename to assets/shims/238b9d2a8b155854c345e182ed5bb93fde663f38a41991747c4b6976bb1977de diff --git a/services/assets/shims/23bb569608dbf5d5ef3b8c791ae1f6987f0cbfc754cc17e7443cda4459cdc8ee b/assets/shims/23bb569608dbf5d5ef3b8c791ae1f6987f0cbfc754cc17e7443cda4459cdc8ee similarity index 100% rename from services/assets/shims/23bb569608dbf5d5ef3b8c791ae1f6987f0cbfc754cc17e7443cda4459cdc8ee rename to assets/shims/23bb569608dbf5d5ef3b8c791ae1f6987f0cbfc754cc17e7443cda4459cdc8ee diff --git a/services/assets/shims/2674dabf11f22067ca7ac632bd3a23daf26945a45f20c043cdda00c06db944b2 b/assets/shims/2674dabf11f22067ca7ac632bd3a23daf26945a45f20c043cdda00c06db944b2 similarity index 100% rename from services/assets/shims/2674dabf11f22067ca7ac632bd3a23daf26945a45f20c043cdda00c06db944b2 rename to assets/shims/2674dabf11f22067ca7ac632bd3a23daf26945a45f20c043cdda00c06db944b2 diff --git a/services/assets/shims/26d24b2c55df45b4228f8b92a0a37fe77f31e20931bf48b0cb43899d161e6619 b/assets/shims/26d24b2c55df45b4228f8b92a0a37fe77f31e20931bf48b0cb43899d161e6619 similarity index 100% rename from services/assets/shims/26d24b2c55df45b4228f8b92a0a37fe77f31e20931bf48b0cb43899d161e6619 rename to assets/shims/26d24b2c55df45b4228f8b92a0a37fe77f31e20931bf48b0cb43899d161e6619 diff --git a/services/assets/shims/2881fa2b95c13ff636e3e4344991f9bdcbcb1731afacbd56e3f1a0e626daef88 b/assets/shims/2881fa2b95c13ff636e3e4344991f9bdcbcb1731afacbd56e3f1a0e626daef88 similarity index 100% rename from services/assets/shims/2881fa2b95c13ff636e3e4344991f9bdcbcb1731afacbd56e3f1a0e626daef88 rename to assets/shims/2881fa2b95c13ff636e3e4344991f9bdcbcb1731afacbd56e3f1a0e626daef88 diff --git a/services/assets/shims/2baacd05ea5d29855b6cd8d7983f526b5b7bc430fa1338fc95aa6b81188a86ea b/assets/shims/2baacd05ea5d29855b6cd8d7983f526b5b7bc430fa1338fc95aa6b81188a86ea similarity index 100% rename from services/assets/shims/2baacd05ea5d29855b6cd8d7983f526b5b7bc430fa1338fc95aa6b81188a86ea rename to assets/shims/2baacd05ea5d29855b6cd8d7983f526b5b7bc430fa1338fc95aa6b81188a86ea diff --git a/services/assets/shims/2d4178d6ea809e4ec0e60b62ec20d2f8daa8e447945c8234fda0f53899ec8608 b/assets/shims/2d4178d6ea809e4ec0e60b62ec20d2f8daa8e447945c8234fda0f53899ec8608 similarity index 100% rename from services/assets/shims/2d4178d6ea809e4ec0e60b62ec20d2f8daa8e447945c8234fda0f53899ec8608 rename to assets/shims/2d4178d6ea809e4ec0e60b62ec20d2f8daa8e447945c8234fda0f53899ec8608 diff --git a/services/assets/shims/2f01d00cbe00419ed2abd6adce9cd503c5e49d139b32e4d85001d98e5c5f3b2f b/assets/shims/2f01d00cbe00419ed2abd6adce9cd503c5e49d139b32e4d85001d98e5c5f3b2f similarity index 100% rename from services/assets/shims/2f01d00cbe00419ed2abd6adce9cd503c5e49d139b32e4d85001d98e5c5f3b2f rename to assets/shims/2f01d00cbe00419ed2abd6adce9cd503c5e49d139b32e4d85001d98e5c5f3b2f diff --git a/services/assets/shims/30055b16bcb5ac33019a5c54ead52234b7031cdf656eb95049afef2deec74379 b/assets/shims/30055b16bcb5ac33019a5c54ead52234b7031cdf656eb95049afef2deec74379 similarity index 100% rename from services/assets/shims/30055b16bcb5ac33019a5c54ead52234b7031cdf656eb95049afef2deec74379 rename to assets/shims/30055b16bcb5ac33019a5c54ead52234b7031cdf656eb95049afef2deec74379 diff --git a/services/assets/shims/31a904e8bb66687602766299b0c77462f9eedc0673fe651f13356675ea72e0b4 b/assets/shims/31a904e8bb66687602766299b0c77462f9eedc0673fe651f13356675ea72e0b4 similarity index 100% rename from services/assets/shims/31a904e8bb66687602766299b0c77462f9eedc0673fe651f13356675ea72e0b4 rename to assets/shims/31a904e8bb66687602766299b0c77462f9eedc0673fe651f13356675ea72e0b4 diff --git a/services/assets/shims/364aa634efb78ef2c33434b108e0fe83b1d23809a61bf231f2db39421bf664d3 b/assets/shims/364aa634efb78ef2c33434b108e0fe83b1d23809a61bf231f2db39421bf664d3 similarity index 100% rename from services/assets/shims/364aa634efb78ef2c33434b108e0fe83b1d23809a61bf231f2db39421bf664d3 rename to assets/shims/364aa634efb78ef2c33434b108e0fe83b1d23809a61bf231f2db39421bf664d3 diff --git a/services/assets/shims/38d421857d85ad26359e6ec5e0acc706638f6bb801ea2c17e0e1a5b3e0de6d8f b/assets/shims/38d421857d85ad26359e6ec5e0acc706638f6bb801ea2c17e0e1a5b3e0de6d8f similarity index 100% rename from services/assets/shims/38d421857d85ad26359e6ec5e0acc706638f6bb801ea2c17e0e1a5b3e0de6d8f rename to assets/shims/38d421857d85ad26359e6ec5e0acc706638f6bb801ea2c17e0e1a5b3e0de6d8f diff --git a/services/assets/shims/3a8a78a881ac67f8a50fbb8926e18cc91b5e0d39aa56885c2a4f746d25bbe19a b/assets/shims/3a8a78a881ac67f8a50fbb8926e18cc91b5e0d39aa56885c2a4f746d25bbe19a similarity index 100% rename from services/assets/shims/3a8a78a881ac67f8a50fbb8926e18cc91b5e0d39aa56885c2a4f746d25bbe19a rename to assets/shims/3a8a78a881ac67f8a50fbb8926e18cc91b5e0d39aa56885c2a4f746d25bbe19a diff --git a/services/assets/shims/3a9f828d5acef78c560530048e3898967174cd15c7d36f7110b599ecdd89fb47 b/assets/shims/3a9f828d5acef78c560530048e3898967174cd15c7d36f7110b599ecdd89fb47 similarity index 100% rename from services/assets/shims/3a9f828d5acef78c560530048e3898967174cd15c7d36f7110b599ecdd89fb47 rename to assets/shims/3a9f828d5acef78c560530048e3898967174cd15c7d36f7110b599ecdd89fb47 diff --git a/services/assets/shims/3a9fb8f4d61bee1dffc496e2e091732d4f93c47b4df92df53c1821aca1f470f6 b/assets/shims/3a9fb8f4d61bee1dffc496e2e091732d4f93c47b4df92df53c1821aca1f470f6 similarity index 100% rename from services/assets/shims/3a9fb8f4d61bee1dffc496e2e091732d4f93c47b4df92df53c1821aca1f470f6 rename to assets/shims/3a9fb8f4d61bee1dffc496e2e091732d4f93c47b4df92df53c1821aca1f470f6 diff --git a/services/assets/shims/3ec37b35f3542a430e674de28323011e5b53185e24510594ef18b0ff19685e93 b/assets/shims/3ec37b35f3542a430e674de28323011e5b53185e24510594ef18b0ff19685e93 similarity index 100% rename from services/assets/shims/3ec37b35f3542a430e674de28323011e5b53185e24510594ef18b0ff19685e93 rename to assets/shims/3ec37b35f3542a430e674de28323011e5b53185e24510594ef18b0ff19685e93 diff --git a/services/assets/shims/402df4155d0c95809d239006a9e0bb7b01728dac25d51dd94abe774193700690 b/assets/shims/402df4155d0c95809d239006a9e0bb7b01728dac25d51dd94abe774193700690 similarity index 100% rename from services/assets/shims/402df4155d0c95809d239006a9e0bb7b01728dac25d51dd94abe774193700690 rename to assets/shims/402df4155d0c95809d239006a9e0bb7b01728dac25d51dd94abe774193700690 diff --git a/services/assets/shims/4084cdcd9bc00b56bcf66962d6f05de5cef7816bca82eb98224c8f27fce09121 b/assets/shims/4084cdcd9bc00b56bcf66962d6f05de5cef7816bca82eb98224c8f27fce09121 similarity index 100% rename from services/assets/shims/4084cdcd9bc00b56bcf66962d6f05de5cef7816bca82eb98224c8f27fce09121 rename to assets/shims/4084cdcd9bc00b56bcf66962d6f05de5cef7816bca82eb98224c8f27fce09121 diff --git a/services/assets/shims/40989c335af1b23ffcae52f453fb96d5a343d2ec40f6fd93682b993b1c9fcf62 b/assets/shims/40989c335af1b23ffcae52f453fb96d5a343d2ec40f6fd93682b993b1c9fcf62 similarity index 100% rename from services/assets/shims/40989c335af1b23ffcae52f453fb96d5a343d2ec40f6fd93682b993b1c9fcf62 rename to assets/shims/40989c335af1b23ffcae52f453fb96d5a343d2ec40f6fd93682b993b1c9fcf62 diff --git a/services/assets/shims/419791318064460de7d8c5da5c75f35848c023f4e354d5e579df2c366e2340fa b/assets/shims/419791318064460de7d8c5da5c75f35848c023f4e354d5e579df2c366e2340fa similarity index 100% rename from services/assets/shims/419791318064460de7d8c5da5c75f35848c023f4e354d5e579df2c366e2340fa rename to assets/shims/419791318064460de7d8c5da5c75f35848c023f4e354d5e579df2c366e2340fa diff --git a/services/assets/shims/44fe565b76527beb49ee0d87448343e79c835ae6814a03d320366492c7b0b956 b/assets/shims/44fe565b76527beb49ee0d87448343e79c835ae6814a03d320366492c7b0b956 similarity index 100% rename from services/assets/shims/44fe565b76527beb49ee0d87448343e79c835ae6814a03d320366492c7b0b956 rename to assets/shims/44fe565b76527beb49ee0d87448343e79c835ae6814a03d320366492c7b0b956 diff --git a/services/assets/shims/4634339e9ab2678838c136b3f0c4fc6234dfb8776481d30132bea5cbef007146 b/assets/shims/4634339e9ab2678838c136b3f0c4fc6234dfb8776481d30132bea5cbef007146 similarity index 100% rename from services/assets/shims/4634339e9ab2678838c136b3f0c4fc6234dfb8776481d30132bea5cbef007146 rename to assets/shims/4634339e9ab2678838c136b3f0c4fc6234dfb8776481d30132bea5cbef007146 diff --git a/services/assets/shims/46bda65124b64e0af1ffa20d11f1c30a9a0148c61f79bfc760174136468654b2 b/assets/shims/46bda65124b64e0af1ffa20d11f1c30a9a0148c61f79bfc760174136468654b2 similarity index 100% rename from services/assets/shims/46bda65124b64e0af1ffa20d11f1c30a9a0148c61f79bfc760174136468654b2 rename to assets/shims/46bda65124b64e0af1ffa20d11f1c30a9a0148c61f79bfc760174136468654b2 diff --git a/services/assets/shims/46e4506971b6348ce09c9cd9af62f2f78f7ad18f4e79288ddbd378bfe00d4f70 b/assets/shims/46e4506971b6348ce09c9cd9af62f2f78f7ad18f4e79288ddbd378bfe00d4f70 similarity index 100% rename from services/assets/shims/46e4506971b6348ce09c9cd9af62f2f78f7ad18f4e79288ddbd378bfe00d4f70 rename to assets/shims/46e4506971b6348ce09c9cd9af62f2f78f7ad18f4e79288ddbd378bfe00d4f70 diff --git a/services/assets/shims/47baccb1917e893b5519d6e967b47c841abddcafce38d0e92c0e70b301f09209 b/assets/shims/47baccb1917e893b5519d6e967b47c841abddcafce38d0e92c0e70b301f09209 similarity index 100% rename from services/assets/shims/47baccb1917e893b5519d6e967b47c841abddcafce38d0e92c0e70b301f09209 rename to assets/shims/47baccb1917e893b5519d6e967b47c841abddcafce38d0e92c0e70b301f09209 diff --git a/services/assets/shims/4e7608ac389d05f02897d0358f3664d2cb5c5348d827ce240bd8265639061061 b/assets/shims/4e7608ac389d05f02897d0358f3664d2cb5c5348d827ce240bd8265639061061 similarity index 100% rename from services/assets/shims/4e7608ac389d05f02897d0358f3664d2cb5c5348d827ce240bd8265639061061 rename to assets/shims/4e7608ac389d05f02897d0358f3664d2cb5c5348d827ce240bd8265639061061 diff --git a/services/assets/shims/4e9e86e7a2c7e9f0ef3aa570ca007c1b71ff95bde339c396c3340bb602ad8dce b/assets/shims/4e9e86e7a2c7e9f0ef3aa570ca007c1b71ff95bde339c396c3340bb602ad8dce similarity index 100% rename from services/assets/shims/4e9e86e7a2c7e9f0ef3aa570ca007c1b71ff95bde339c396c3340bb602ad8dce rename to assets/shims/4e9e86e7a2c7e9f0ef3aa570ca007c1b71ff95bde339c396c3340bb602ad8dce diff --git a/services/assets/shims/4eded720be99fd3b601de0f0401c5c46234d4231c32487feb273f259dfcbc8df b/assets/shims/4eded720be99fd3b601de0f0401c5c46234d4231c32487feb273f259dfcbc8df similarity index 100% rename from services/assets/shims/4eded720be99fd3b601de0f0401c5c46234d4231c32487feb273f259dfcbc8df rename to assets/shims/4eded720be99fd3b601de0f0401c5c46234d4231c32487feb273f259dfcbc8df diff --git a/services/assets/shims/518176382c8bfbe7b0d9e3becc5ff0c593463041b47500a10c7a24aafcadf32b b/assets/shims/518176382c8bfbe7b0d9e3becc5ff0c593463041b47500a10c7a24aafcadf32b similarity index 100% rename from services/assets/shims/518176382c8bfbe7b0d9e3becc5ff0c593463041b47500a10c7a24aafcadf32b rename to assets/shims/518176382c8bfbe7b0d9e3becc5ff0c593463041b47500a10c7a24aafcadf32b diff --git a/services/assets/shims/534b1649b5190794976fbc0f79eaf3b393ee486302b4012304a9b8194e579ab7 b/assets/shims/534b1649b5190794976fbc0f79eaf3b393ee486302b4012304a9b8194e579ab7 similarity index 100% rename from services/assets/shims/534b1649b5190794976fbc0f79eaf3b393ee486302b4012304a9b8194e579ab7 rename to assets/shims/534b1649b5190794976fbc0f79eaf3b393ee486302b4012304a9b8194e579ab7 diff --git a/services/assets/shims/536ebf977fb25a0a307c8689d4e81810775aaeb689f39d8c4acd6745adc400d0 b/assets/shims/536ebf977fb25a0a307c8689d4e81810775aaeb689f39d8c4acd6745adc400d0 similarity index 100% rename from services/assets/shims/536ebf977fb25a0a307c8689d4e81810775aaeb689f39d8c4acd6745adc400d0 rename to assets/shims/536ebf977fb25a0a307c8689d4e81810775aaeb689f39d8c4acd6745adc400d0 diff --git a/services/assets/shims/53c31c279e0a36040444ede86282171894fb121061c5ec07fcd93ddf06a7b76a b/assets/shims/53c31c279e0a36040444ede86282171894fb121061c5ec07fcd93ddf06a7b76a similarity index 100% rename from services/assets/shims/53c31c279e0a36040444ede86282171894fb121061c5ec07fcd93ddf06a7b76a rename to assets/shims/53c31c279e0a36040444ede86282171894fb121061c5ec07fcd93ddf06a7b76a diff --git a/services/assets/shims/54472386276871d373863544c9931bfb92c4ad03574b14542d1d091d7c544460 b/assets/shims/54472386276871d373863544c9931bfb92c4ad03574b14542d1d091d7c544460 similarity index 100% rename from services/assets/shims/54472386276871d373863544c9931bfb92c4ad03574b14542d1d091d7c544460 rename to assets/shims/54472386276871d373863544c9931bfb92c4ad03574b14542d1d091d7c544460 diff --git a/services/assets/shims/577188c84bd70718d3b7c1b7b01bdb97354d5ab9465d895c18a1efba9771ff14 b/assets/shims/577188c84bd70718d3b7c1b7b01bdb97354d5ab9465d895c18a1efba9771ff14 similarity index 100% rename from services/assets/shims/577188c84bd70718d3b7c1b7b01bdb97354d5ab9465d895c18a1efba9771ff14 rename to assets/shims/577188c84bd70718d3b7c1b7b01bdb97354d5ab9465d895c18a1efba9771ff14 diff --git a/services/assets/shims/5797b4a20df4131dfe8ccfe11191c6cc5d5be3f094bd285a4bd9ff2c56c9bd8a b/assets/shims/5797b4a20df4131dfe8ccfe11191c6cc5d5be3f094bd285a4bd9ff2c56c9bd8a similarity index 100% rename from services/assets/shims/5797b4a20df4131dfe8ccfe11191c6cc5d5be3f094bd285a4bd9ff2c56c9bd8a rename to assets/shims/5797b4a20df4131dfe8ccfe11191c6cc5d5be3f094bd285a4bd9ff2c56c9bd8a diff --git a/services/assets/shims/5c25220936d9002e75d4594437f7de64ec1edf6a18db0323d397c1bc26720280 b/assets/shims/5c25220936d9002e75d4594437f7de64ec1edf6a18db0323d397c1bc26720280 similarity index 100% rename from services/assets/shims/5c25220936d9002e75d4594437f7de64ec1edf6a18db0323d397c1bc26720280 rename to assets/shims/5c25220936d9002e75d4594437f7de64ec1edf6a18db0323d397c1bc26720280 diff --git a/services/assets/shims/5d1708894e903e38be1d82f4ecc7efe2a3ef1c7c4755c2d714c2b0db92edc6c3 b/assets/shims/5d1708894e903e38be1d82f4ecc7efe2a3ef1c7c4755c2d714c2b0db92edc6c3 similarity index 100% rename from services/assets/shims/5d1708894e903e38be1d82f4ecc7efe2a3ef1c7c4755c2d714c2b0db92edc6c3 rename to assets/shims/5d1708894e903e38be1d82f4ecc7efe2a3ef1c7c4755c2d714c2b0db92edc6c3 diff --git a/services/assets/shims/601ececdf21f0c0d9cf07d31da13c510273f92b0332875a5261863390c5ea08c b/assets/shims/601ececdf21f0c0d9cf07d31da13c510273f92b0332875a5261863390c5ea08c similarity index 100% rename from services/assets/shims/601ececdf21f0c0d9cf07d31da13c510273f92b0332875a5261863390c5ea08c rename to assets/shims/601ececdf21f0c0d9cf07d31da13c510273f92b0332875a5261863390c5ea08c diff --git a/services/assets/shims/608f107a6ee3be7cbb70f900ec4c609d43a7248b9d2194d7ccc73cbf733fef13 b/assets/shims/608f107a6ee3be7cbb70f900ec4c609d43a7248b9d2194d7ccc73cbf733fef13 similarity index 100% rename from services/assets/shims/608f107a6ee3be7cbb70f900ec4c609d43a7248b9d2194d7ccc73cbf733fef13 rename to assets/shims/608f107a6ee3be7cbb70f900ec4c609d43a7248b9d2194d7ccc73cbf733fef13 diff --git a/services/assets/shims/60b89e83057e9f4b636aa2976542dc2ffb742a6e135442526a50808908885137 b/assets/shims/60b89e83057e9f4b636aa2976542dc2ffb742a6e135442526a50808908885137 similarity index 100% rename from services/assets/shims/60b89e83057e9f4b636aa2976542dc2ffb742a6e135442526a50808908885137 rename to assets/shims/60b89e83057e9f4b636aa2976542dc2ffb742a6e135442526a50808908885137 diff --git a/services/assets/shims/61e7835d25625dc73936d3342451615b3067510fa4f49004fd083423c3a53c09 b/assets/shims/61e7835d25625dc73936d3342451615b3067510fa4f49004fd083423c3a53c09 similarity index 100% rename from services/assets/shims/61e7835d25625dc73936d3342451615b3067510fa4f49004fd083423c3a53c09 rename to assets/shims/61e7835d25625dc73936d3342451615b3067510fa4f49004fd083423c3a53c09 diff --git a/services/assets/shims/6210a4889190951adef6e22071e8363a701e8fcd4f753b47897880eeb4a863ef b/assets/shims/6210a4889190951adef6e22071e8363a701e8fcd4f753b47897880eeb4a863ef similarity index 100% rename from services/assets/shims/6210a4889190951adef6e22071e8363a701e8fcd4f753b47897880eeb4a863ef rename to assets/shims/6210a4889190951adef6e22071e8363a701e8fcd4f753b47897880eeb4a863ef diff --git a/services/assets/shims/6546d29854a444d88e4940847d2697cd96906ceb7c5dfbe9457154a5e948b8dd b/assets/shims/6546d29854a444d88e4940847d2697cd96906ceb7c5dfbe9457154a5e948b8dd similarity index 100% rename from services/assets/shims/6546d29854a444d88e4940847d2697cd96906ceb7c5dfbe9457154a5e948b8dd rename to assets/shims/6546d29854a444d88e4940847d2697cd96906ceb7c5dfbe9457154a5e948b8dd diff --git a/services/assets/shims/67ac0f2050f62bc61691c25e397d9002f945d6adfab03acc1b299888e22e9d37 b/assets/shims/67ac0f2050f62bc61691c25e397d9002f945d6adfab03acc1b299888e22e9d37 similarity index 100% rename from services/assets/shims/67ac0f2050f62bc61691c25e397d9002f945d6adfab03acc1b299888e22e9d37 rename to assets/shims/67ac0f2050f62bc61691c25e397d9002f945d6adfab03acc1b299888e22e9d37 diff --git a/services/assets/shims/684d6b7c7ccb7a13f41611bc42ae172b531b36924978ee6987c5883335acc99a b/assets/shims/684d6b7c7ccb7a13f41611bc42ae172b531b36924978ee6987c5883335acc99a similarity index 100% rename from services/assets/shims/684d6b7c7ccb7a13f41611bc42ae172b531b36924978ee6987c5883335acc99a rename to assets/shims/684d6b7c7ccb7a13f41611bc42ae172b531b36924978ee6987c5883335acc99a diff --git a/services/assets/shims/6b465bd4c49c1c22e4cde3093d21b019153d2dc4a9a1cdef25f66661a479dd59 b/assets/shims/6b465bd4c49c1c22e4cde3093d21b019153d2dc4a9a1cdef25f66661a479dd59 similarity index 100% rename from services/assets/shims/6b465bd4c49c1c22e4cde3093d21b019153d2dc4a9a1cdef25f66661a479dd59 rename to assets/shims/6b465bd4c49c1c22e4cde3093d21b019153d2dc4a9a1cdef25f66661a479dd59 diff --git a/services/assets/shims/6b675b857fefad9dc4141bc1814d38329e67ecd0f17e4271119fe69065264f4e b/assets/shims/6b675b857fefad9dc4141bc1814d38329e67ecd0f17e4271119fe69065264f4e similarity index 100% rename from services/assets/shims/6b675b857fefad9dc4141bc1814d38329e67ecd0f17e4271119fe69065264f4e rename to assets/shims/6b675b857fefad9dc4141bc1814d38329e67ecd0f17e4271119fe69065264f4e diff --git a/services/assets/shims/6b6fad45d0efd68727289dba37658b41af7b7a609b5ac18c500136e0b9e786ba b/assets/shims/6b6fad45d0efd68727289dba37658b41af7b7a609b5ac18c500136e0b9e786ba similarity index 100% rename from services/assets/shims/6b6fad45d0efd68727289dba37658b41af7b7a609b5ac18c500136e0b9e786ba rename to assets/shims/6b6fad45d0efd68727289dba37658b41af7b7a609b5ac18c500136e0b9e786ba diff --git a/services/assets/shims/6deae9f0900147b5c3b6772a1121a579fcab215029ad1941af6c5041c1d0bb56 b/assets/shims/6deae9f0900147b5c3b6772a1121a579fcab215029ad1941af6c5041c1d0bb56 similarity index 100% rename from services/assets/shims/6deae9f0900147b5c3b6772a1121a579fcab215029ad1941af6c5041c1d0bb56 rename to assets/shims/6deae9f0900147b5c3b6772a1121a579fcab215029ad1941af6c5041c1d0bb56 diff --git a/services/assets/shims/6ed2fd9bc2813e98df6995e17bf14274859ce22f5a2e9bf6b429839c1295d1cc b/assets/shims/6ed2fd9bc2813e98df6995e17bf14274859ce22f5a2e9bf6b429839c1295d1cc similarity index 100% rename from services/assets/shims/6ed2fd9bc2813e98df6995e17bf14274859ce22f5a2e9bf6b429839c1295d1cc rename to assets/shims/6ed2fd9bc2813e98df6995e17bf14274859ce22f5a2e9bf6b429839c1295d1cc diff --git a/services/assets/shims/74d51107c77db2cbad454be2e40c07a82d26da7bef52399c60b9a21fd60c347e b/assets/shims/74d51107c77db2cbad454be2e40c07a82d26da7bef52399c60b9a21fd60c347e similarity index 100% rename from services/assets/shims/74d51107c77db2cbad454be2e40c07a82d26da7bef52399c60b9a21fd60c347e rename to assets/shims/74d51107c77db2cbad454be2e40c07a82d26da7bef52399c60b9a21fd60c347e diff --git a/services/assets/shims/78c9df9f7a10efe67e5a666cf7072ad3730d1e7fd6a39ab4296e1e84c206331f b/assets/shims/78c9df9f7a10efe67e5a666cf7072ad3730d1e7fd6a39ab4296e1e84c206331f similarity index 100% rename from services/assets/shims/78c9df9f7a10efe67e5a666cf7072ad3730d1e7fd6a39ab4296e1e84c206331f rename to assets/shims/78c9df9f7a10efe67e5a666cf7072ad3730d1e7fd6a39ab4296e1e84c206331f diff --git a/services/assets/shims/78eade6ded0eddf4d52e67d017258b1701eff5606157a41fc88a5535d5af6ed3 b/assets/shims/78eade6ded0eddf4d52e67d017258b1701eff5606157a41fc88a5535d5af6ed3 similarity index 100% rename from services/assets/shims/78eade6ded0eddf4d52e67d017258b1701eff5606157a41fc88a5535d5af6ed3 rename to assets/shims/78eade6ded0eddf4d52e67d017258b1701eff5606157a41fc88a5535d5af6ed3 diff --git a/services/assets/shims/78f98103e1afd0671c3b7d98e7c74b20e911397e59d37bddd0c197c41b0b8ed7 b/assets/shims/78f98103e1afd0671c3b7d98e7c74b20e911397e59d37bddd0c197c41b0b8ed7 similarity index 100% rename from services/assets/shims/78f98103e1afd0671c3b7d98e7c74b20e911397e59d37bddd0c197c41b0b8ed7 rename to assets/shims/78f98103e1afd0671c3b7d98e7c74b20e911397e59d37bddd0c197c41b0b8ed7 diff --git a/services/assets/shims/79958066355c53408602c25f23be2b405e77ab05c205df8b709e2fcc1ab15275 b/assets/shims/79958066355c53408602c25f23be2b405e77ab05c205df8b709e2fcc1ab15275 similarity index 100% rename from services/assets/shims/79958066355c53408602c25f23be2b405e77ab05c205df8b709e2fcc1ab15275 rename to assets/shims/79958066355c53408602c25f23be2b405e77ab05c205df8b709e2fcc1ab15275 diff --git a/services/assets/shims/7a92ff40540c1226ca897cc70256794e89c416498c22e6605cd4e454a37eb80a b/assets/shims/7a92ff40540c1226ca897cc70256794e89c416498c22e6605cd4e454a37eb80a similarity index 100% rename from services/assets/shims/7a92ff40540c1226ca897cc70256794e89c416498c22e6605cd4e454a37eb80a rename to assets/shims/7a92ff40540c1226ca897cc70256794e89c416498c22e6605cd4e454a37eb80a diff --git a/services/assets/shims/7c41c7363191e26bd695cce906da1c8a59eee7e288f2ef79c8ab2b66062f059b b/assets/shims/7c41c7363191e26bd695cce906da1c8a59eee7e288f2ef79c8ab2b66062f059b similarity index 100% rename from services/assets/shims/7c41c7363191e26bd695cce906da1c8a59eee7e288f2ef79c8ab2b66062f059b rename to assets/shims/7c41c7363191e26bd695cce906da1c8a59eee7e288f2ef79c8ab2b66062f059b diff --git a/services/assets/shims/7f2db2ac187eb3fabd1c33946026dbac76f50fa47237a44bfa1850b691ef4984 b/assets/shims/7f2db2ac187eb3fabd1c33946026dbac76f50fa47237a44bfa1850b691ef4984 similarity index 100% rename from services/assets/shims/7f2db2ac187eb3fabd1c33946026dbac76f50fa47237a44bfa1850b691ef4984 rename to assets/shims/7f2db2ac187eb3fabd1c33946026dbac76f50fa47237a44bfa1850b691ef4984 diff --git a/services/assets/shims/7febbe186a9f9b1ba1ca8da30003618602257f7b1ad6020d6a4125ea0b67a483 b/assets/shims/7febbe186a9f9b1ba1ca8da30003618602257f7b1ad6020d6a4125ea0b67a483 similarity index 100% rename from services/assets/shims/7febbe186a9f9b1ba1ca8da30003618602257f7b1ad6020d6a4125ea0b67a483 rename to assets/shims/7febbe186a9f9b1ba1ca8da30003618602257f7b1ad6020d6a4125ea0b67a483 diff --git a/services/assets/shims/80ed75d2a6861c9d1f52691ab90520c4cf657596ee8736edd85b3d65ca234730 b/assets/shims/80ed75d2a6861c9d1f52691ab90520c4cf657596ee8736edd85b3d65ca234730 similarity index 100% rename from services/assets/shims/80ed75d2a6861c9d1f52691ab90520c4cf657596ee8736edd85b3d65ca234730 rename to assets/shims/80ed75d2a6861c9d1f52691ab90520c4cf657596ee8736edd85b3d65ca234730 diff --git a/services/assets/shims/846899b83399db2b0d0f21155d19d53b82f04602f3396af515654759b7859157 b/assets/shims/846899b83399db2b0d0f21155d19d53b82f04602f3396af515654759b7859157 similarity index 100% rename from services/assets/shims/846899b83399db2b0d0f21155d19d53b82f04602f3396af515654759b7859157 rename to assets/shims/846899b83399db2b0d0f21155d19d53b82f04602f3396af515654759b7859157 diff --git a/services/assets/shims/8509fa695f7299d81e1f1051127d0743061962d11cd58fecf9e3a1663a631a12 b/assets/shims/8509fa695f7299d81e1f1051127d0743061962d11cd58fecf9e3a1663a631a12 similarity index 100% rename from services/assets/shims/8509fa695f7299d81e1f1051127d0743061962d11cd58fecf9e3a1663a631a12 rename to assets/shims/8509fa695f7299d81e1f1051127d0743061962d11cd58fecf9e3a1663a631a12 diff --git a/services/assets/shims/87c85583f2a9983d0f4d064eb6644d0c12992908d5db23a8d3b512006f16e449 b/assets/shims/87c85583f2a9983d0f4d064eb6644d0c12992908d5db23a8d3b512006f16e449 similarity index 100% rename from services/assets/shims/87c85583f2a9983d0f4d064eb6644d0c12992908d5db23a8d3b512006f16e449 rename to assets/shims/87c85583f2a9983d0f4d064eb6644d0c12992908d5db23a8d3b512006f16e449 diff --git a/services/assets/shims/89f2be9c62e35b78a88a7165d54681c2fade406daa367e406d21f9167f9e95c9 b/assets/shims/89f2be9c62e35b78a88a7165d54681c2fade406daa367e406d21f9167f9e95c9 similarity index 100% rename from services/assets/shims/89f2be9c62e35b78a88a7165d54681c2fade406daa367e406d21f9167f9e95c9 rename to assets/shims/89f2be9c62e35b78a88a7165d54681c2fade406daa367e406d21f9167f9e95c9 diff --git a/services/assets/shims/8aa23e342059b2e44a560d3bc0a0356a47ec22e3022b2f33612b67ee5cbe8c8c b/assets/shims/8aa23e342059b2e44a560d3bc0a0356a47ec22e3022b2f33612b67ee5cbe8c8c similarity index 100% rename from services/assets/shims/8aa23e342059b2e44a560d3bc0a0356a47ec22e3022b2f33612b67ee5cbe8c8c rename to assets/shims/8aa23e342059b2e44a560d3bc0a0356a47ec22e3022b2f33612b67ee5cbe8c8c diff --git a/services/assets/shims/8b1efc1b0d24347bae5ec1442a495359df60c22c410334f53b88579137934617 b/assets/shims/8b1efc1b0d24347bae5ec1442a495359df60c22c410334f53b88579137934617 similarity index 100% rename from services/assets/shims/8b1efc1b0d24347bae5ec1442a495359df60c22c410334f53b88579137934617 rename to assets/shims/8b1efc1b0d24347bae5ec1442a495359df60c22c410334f53b88579137934617 diff --git a/services/assets/shims/8e8096913a48276d77e519c93e037dc4312befb4953e7f0abdf3fd6989628b61 b/assets/shims/8e8096913a48276d77e519c93e037dc4312befb4953e7f0abdf3fd6989628b61 similarity index 100% rename from services/assets/shims/8e8096913a48276d77e519c93e037dc4312befb4953e7f0abdf3fd6989628b61 rename to assets/shims/8e8096913a48276d77e519c93e037dc4312befb4953e7f0abdf3fd6989628b61 diff --git a/services/assets/shims/921402a6e4fa68b26750bb7f12300792bf53dbfdc972db95e3b03fb57a9fb668 b/assets/shims/921402a6e4fa68b26750bb7f12300792bf53dbfdc972db95e3b03fb57a9fb668 similarity index 100% rename from services/assets/shims/921402a6e4fa68b26750bb7f12300792bf53dbfdc972db95e3b03fb57a9fb668 rename to assets/shims/921402a6e4fa68b26750bb7f12300792bf53dbfdc972db95e3b03fb57a9fb668 diff --git a/services/assets/shims/9297641e25ef4822c2a474a6fe12b645287bdbcf29776312bbb6a7e11b677722 b/assets/shims/9297641e25ef4822c2a474a6fe12b645287bdbcf29776312bbb6a7e11b677722 similarity index 100% rename from services/assets/shims/9297641e25ef4822c2a474a6fe12b645287bdbcf29776312bbb6a7e11b677722 rename to assets/shims/9297641e25ef4822c2a474a6fe12b645287bdbcf29776312bbb6a7e11b677722 diff --git a/services/assets/shims/932999c779be85b6d62174d68a34c2399f1e5c509a285d2ada02c41abbd8668b b/assets/shims/932999c779be85b6d62174d68a34c2399f1e5c509a285d2ada02c41abbd8668b similarity index 100% rename from services/assets/shims/932999c779be85b6d62174d68a34c2399f1e5c509a285d2ada02c41abbd8668b rename to assets/shims/932999c779be85b6d62174d68a34c2399f1e5c509a285d2ada02c41abbd8668b diff --git a/services/assets/shims/97e17ea6912eb145eafe2e81f28e1b0f2756e500a2e3379e48ed784f43601fb5 b/assets/shims/97e17ea6912eb145eafe2e81f28e1b0f2756e500a2e3379e48ed784f43601fb5 similarity index 100% rename from services/assets/shims/97e17ea6912eb145eafe2e81f28e1b0f2756e500a2e3379e48ed784f43601fb5 rename to assets/shims/97e17ea6912eb145eafe2e81f28e1b0f2756e500a2e3379e48ed784f43601fb5 diff --git a/services/assets/shims/994cc6e26358811fae1ffba218abf94c7898600391008a63b7a23b26d4c71f1f b/assets/shims/994cc6e26358811fae1ffba218abf94c7898600391008a63b7a23b26d4c71f1f similarity index 100% rename from services/assets/shims/994cc6e26358811fae1ffba218abf94c7898600391008a63b7a23b26d4c71f1f rename to assets/shims/994cc6e26358811fae1ffba218abf94c7898600391008a63b7a23b26d4c71f1f diff --git a/services/assets/shims/999e61e0057290b9cfe37371b41ea396be1eb6453751d943e34f85018a65414d b/assets/shims/999e61e0057290b9cfe37371b41ea396be1eb6453751d943e34f85018a65414d similarity index 100% rename from services/assets/shims/999e61e0057290b9cfe37371b41ea396be1eb6453751d943e34f85018a65414d rename to assets/shims/999e61e0057290b9cfe37371b41ea396be1eb6453751d943e34f85018a65414d diff --git a/services/assets/shims/9cb3d3eeba5ce57cfdafc14b0dc44fe34c68073bbff316e25f42915fb0d14c08 b/assets/shims/9cb3d3eeba5ce57cfdafc14b0dc44fe34c68073bbff316e25f42915fb0d14c08 similarity index 100% rename from services/assets/shims/9cb3d3eeba5ce57cfdafc14b0dc44fe34c68073bbff316e25f42915fb0d14c08 rename to assets/shims/9cb3d3eeba5ce57cfdafc14b0dc44fe34c68073bbff316e25f42915fb0d14c08 diff --git a/services/assets/shims/a08a89303012ad39dd8d86009972393d3b5f2d6225b79d36212476d98f3e32c2 b/assets/shims/a08a89303012ad39dd8d86009972393d3b5f2d6225b79d36212476d98f3e32c2 similarity index 100% rename from services/assets/shims/a08a89303012ad39dd8d86009972393d3b5f2d6225b79d36212476d98f3e32c2 rename to assets/shims/a08a89303012ad39dd8d86009972393d3b5f2d6225b79d36212476d98f3e32c2 diff --git a/services/assets/shims/a0c3fcb65a0629967b3ebaa4a3989960ce1fa30dd6a42dc728c43f851ebae3b5 b/assets/shims/a0c3fcb65a0629967b3ebaa4a3989960ce1fa30dd6a42dc728c43f851ebae3b5 similarity index 100% rename from services/assets/shims/a0c3fcb65a0629967b3ebaa4a3989960ce1fa30dd6a42dc728c43f851ebae3b5 rename to assets/shims/a0c3fcb65a0629967b3ebaa4a3989960ce1fa30dd6a42dc728c43f851ebae3b5 diff --git a/services/assets/shims/a141bb3e1e6581b4cce262342025cf22ca6e44945047385e5a4995096698736b b/assets/shims/a141bb3e1e6581b4cce262342025cf22ca6e44945047385e5a4995096698736b similarity index 100% rename from services/assets/shims/a141bb3e1e6581b4cce262342025cf22ca6e44945047385e5a4995096698736b rename to assets/shims/a141bb3e1e6581b4cce262342025cf22ca6e44945047385e5a4995096698736b diff --git a/services/assets/shims/a4c2e7346e2896b1806ea20b0e07c326c5c33840918e0c128076c70c9d66ec49 b/assets/shims/a4c2e7346e2896b1806ea20b0e07c326c5c33840918e0c128076c70c9d66ec49 similarity index 100% rename from services/assets/shims/a4c2e7346e2896b1806ea20b0e07c326c5c33840918e0c128076c70c9d66ec49 rename to assets/shims/a4c2e7346e2896b1806ea20b0e07c326c5c33840918e0c128076c70c9d66ec49 diff --git a/services/assets/shims/a561886320cdfd7cea0a53e29b6336996d9c1bca04bfb445942dc511bbe82cb8 b/assets/shims/a561886320cdfd7cea0a53e29b6336996d9c1bca04bfb445942dc511bbe82cb8 similarity index 100% rename from services/assets/shims/a561886320cdfd7cea0a53e29b6336996d9c1bca04bfb445942dc511bbe82cb8 rename to assets/shims/a561886320cdfd7cea0a53e29b6336996d9c1bca04bfb445942dc511bbe82cb8 diff --git a/services/assets/shims/a752f8bc75470de884553e34aba1e8356ae7232c7c60615bf9a5d57a4535a968 b/assets/shims/a752f8bc75470de884553e34aba1e8356ae7232c7c60615bf9a5d57a4535a968 similarity index 100% rename from services/assets/shims/a752f8bc75470de884553e34aba1e8356ae7232c7c60615bf9a5d57a4535a968 rename to assets/shims/a752f8bc75470de884553e34aba1e8356ae7232c7c60615bf9a5d57a4535a968 diff --git a/services/assets/shims/a99faba74e40ea2656e3a0aab6cc897b9b2d5718684b7113ccf7f0106d1abe28 b/assets/shims/a99faba74e40ea2656e3a0aab6cc897b9b2d5718684b7113ccf7f0106d1abe28 similarity index 100% rename from services/assets/shims/a99faba74e40ea2656e3a0aab6cc897b9b2d5718684b7113ccf7f0106d1abe28 rename to assets/shims/a99faba74e40ea2656e3a0aab6cc897b9b2d5718684b7113ccf7f0106d1abe28 diff --git a/services/assets/shims/a9f100e7301c5e0373caf4c8458753f30d9b9ad9ffcb10d1e1d0a213ad8b2e5a b/assets/shims/a9f100e7301c5e0373caf4c8458753f30d9b9ad9ffcb10d1e1d0a213ad8b2e5a similarity index 100% rename from services/assets/shims/a9f100e7301c5e0373caf4c8458753f30d9b9ad9ffcb10d1e1d0a213ad8b2e5a rename to assets/shims/a9f100e7301c5e0373caf4c8458753f30d9b9ad9ffcb10d1e1d0a213ad8b2e5a diff --git a/services/assets/shims/aa72dbf8e55454fd93014e0badac97fe3fd89dabc9960a7b157b6c56807ef547 b/assets/shims/aa72dbf8e55454fd93014e0badac97fe3fd89dabc9960a7b157b6c56807ef547 similarity index 100% rename from services/assets/shims/aa72dbf8e55454fd93014e0badac97fe3fd89dabc9960a7b157b6c56807ef547 rename to assets/shims/aa72dbf8e55454fd93014e0badac97fe3fd89dabc9960a7b157b6c56807ef547 diff --git a/services/assets/shims/aac16cda84d303e530bd61ea8b38f9094f192769e84b8edd5c177a14fec1800f b/assets/shims/aac16cda84d303e530bd61ea8b38f9094f192769e84b8edd5c177a14fec1800f similarity index 100% rename from services/assets/shims/aac16cda84d303e530bd61ea8b38f9094f192769e84b8edd5c177a14fec1800f rename to assets/shims/aac16cda84d303e530bd61ea8b38f9094f192769e84b8edd5c177a14fec1800f diff --git a/services/assets/shims/abf5107bf10ab1e6563812a0a551058a9487ada3b0a428e76f311ac7a20f35f9 b/assets/shims/abf5107bf10ab1e6563812a0a551058a9487ada3b0a428e76f311ac7a20f35f9 similarity index 100% rename from services/assets/shims/abf5107bf10ab1e6563812a0a551058a9487ada3b0a428e76f311ac7a20f35f9 rename to assets/shims/abf5107bf10ab1e6563812a0a551058a9487ada3b0a428e76f311ac7a20f35f9 diff --git a/services/assets/shims/ac004205c1b050cff366a79f22ad317bcfd3ff8d97af8eda2fb1f2f1120fb1fc b/assets/shims/ac004205c1b050cff366a79f22ad317bcfd3ff8d97af8eda2fb1f2f1120fb1fc similarity index 100% rename from services/assets/shims/ac004205c1b050cff366a79f22ad317bcfd3ff8d97af8eda2fb1f2f1120fb1fc rename to assets/shims/ac004205c1b050cff366a79f22ad317bcfd3ff8d97af8eda2fb1f2f1120fb1fc diff --git a/services/assets/shims/accfa6d33af22137682f53e393327a7467258fa5d7ceff24882f395ac694f50f b/assets/shims/accfa6d33af22137682f53e393327a7467258fa5d7ceff24882f395ac694f50f similarity index 100% rename from services/assets/shims/accfa6d33af22137682f53e393327a7467258fa5d7ceff24882f395ac694f50f rename to assets/shims/accfa6d33af22137682f53e393327a7467258fa5d7ceff24882f395ac694f50f diff --git a/services/assets/shims/ae71d662c0da44590a1e4a3018550f74f2743c2aa845211c869dcc3e7a9cd313 b/assets/shims/ae71d662c0da44590a1e4a3018550f74f2743c2aa845211c869dcc3e7a9cd313 similarity index 100% rename from services/assets/shims/ae71d662c0da44590a1e4a3018550f74f2743c2aa845211c869dcc3e7a9cd313 rename to assets/shims/ae71d662c0da44590a1e4a3018550f74f2743c2aa845211c869dcc3e7a9cd313 diff --git a/services/assets/shims/b26a48eebeb2277a0624e1896b735de830cba89aebc669ba2e1883f3899bb48b b/assets/shims/b26a48eebeb2277a0624e1896b735de830cba89aebc669ba2e1883f3899bb48b similarity index 100% rename from services/assets/shims/b26a48eebeb2277a0624e1896b735de830cba89aebc669ba2e1883f3899bb48b rename to assets/shims/b26a48eebeb2277a0624e1896b735de830cba89aebc669ba2e1883f3899bb48b diff --git a/services/assets/shims/b28b004f4ff7e805db1ae614bd528182ebe36debddae6332d8e945eb97b2ac47 b/assets/shims/b28b004f4ff7e805db1ae614bd528182ebe36debddae6332d8e945eb97b2ac47 similarity index 100% rename from services/assets/shims/b28b004f4ff7e805db1ae614bd528182ebe36debddae6332d8e945eb97b2ac47 rename to assets/shims/b28b004f4ff7e805db1ae614bd528182ebe36debddae6332d8e945eb97b2ac47 diff --git a/services/assets/shims/b5be8e3d8c0775056030b9c90b48d589951b4a7d3ad030881e40c5f489acb75b b/assets/shims/b5be8e3d8c0775056030b9c90b48d589951b4a7d3ad030881e40c5f489acb75b similarity index 100% rename from services/assets/shims/b5be8e3d8c0775056030b9c90b48d589951b4a7d3ad030881e40c5f489acb75b rename to assets/shims/b5be8e3d8c0775056030b9c90b48d589951b4a7d3ad030881e40c5f489acb75b diff --git a/services/assets/shims/b6117b0fdd101a4ef34f2a1b8e735a9858c308d2b36018bae9cf6f7f61e76145 b/assets/shims/b6117b0fdd101a4ef34f2a1b8e735a9858c308d2b36018bae9cf6f7f61e76145 similarity index 100% rename from services/assets/shims/b6117b0fdd101a4ef34f2a1b8e735a9858c308d2b36018bae9cf6f7f61e76145 rename to assets/shims/b6117b0fdd101a4ef34f2a1b8e735a9858c308d2b36018bae9cf6f7f61e76145 diff --git a/services/assets/shims/b62f6f7dcd1516d2136653d501760bc1507813dcf4e7b4672353761aff213131 b/assets/shims/b62f6f7dcd1516d2136653d501760bc1507813dcf4e7b4672353761aff213131 similarity index 100% rename from services/assets/shims/b62f6f7dcd1516d2136653d501760bc1507813dcf4e7b4672353761aff213131 rename to assets/shims/b62f6f7dcd1516d2136653d501760bc1507813dcf4e7b4672353761aff213131 diff --git a/services/assets/shims/b634f311e3a13d76c4b94e54897f26427c1f69ffac52f450c27ce9c3b25a9056 b/assets/shims/b634f311e3a13d76c4b94e54897f26427c1f69ffac52f450c27ce9c3b25a9056 similarity index 100% rename from services/assets/shims/b634f311e3a13d76c4b94e54897f26427c1f69ffac52f450c27ce9c3b25a9056 rename to assets/shims/b634f311e3a13d76c4b94e54897f26427c1f69ffac52f450c27ce9c3b25a9056 diff --git a/services/assets/shims/b7a3b5d824331a2132ac15933b6329e261672ab937c7415006936e8a251c8ced b/assets/shims/b7a3b5d824331a2132ac15933b6329e261672ab937c7415006936e8a251c8ced similarity index 100% rename from services/assets/shims/b7a3b5d824331a2132ac15933b6329e261672ab937c7415006936e8a251c8ced rename to assets/shims/b7a3b5d824331a2132ac15933b6329e261672ab937c7415006936e8a251c8ced diff --git a/services/assets/shims/b800a1950799c897961193b81095b075d92b8c01727ce5192097befd3c857513 b/assets/shims/b800a1950799c897961193b81095b075d92b8c01727ce5192097befd3c857513 similarity index 100% rename from services/assets/shims/b800a1950799c897961193b81095b075d92b8c01727ce5192097befd3c857513 rename to assets/shims/b800a1950799c897961193b81095b075d92b8c01727ce5192097befd3c857513 diff --git a/services/assets/shims/b9dde5438931f5b25e07c83f22d0ddca169082c636f932b2f0398df597e7e774 b/assets/shims/b9dde5438931f5b25e07c83f22d0ddca169082c636f932b2f0398df597e7e774 similarity index 100% rename from services/assets/shims/b9dde5438931f5b25e07c83f22d0ddca169082c636f932b2f0398df597e7e774 rename to assets/shims/b9dde5438931f5b25e07c83f22d0ddca169082c636f932b2f0398df597e7e774 diff --git a/services/assets/shims/bac460c6ce3c5ca92e45e13a0a5d335e8130a37002dd20a4c20e471b434ccd61 b/assets/shims/bac460c6ce3c5ca92e45e13a0a5d335e8130a37002dd20a4c20e471b434ccd61 similarity index 100% rename from services/assets/shims/bac460c6ce3c5ca92e45e13a0a5d335e8130a37002dd20a4c20e471b434ccd61 rename to assets/shims/bac460c6ce3c5ca92e45e13a0a5d335e8130a37002dd20a4c20e471b434ccd61 diff --git a/services/assets/shims/bb001e9478210c3d93dbf8c2a48434a4b9cefab6b841694805fedb1e358942cc b/assets/shims/bb001e9478210c3d93dbf8c2a48434a4b9cefab6b841694805fedb1e358942cc similarity index 100% rename from services/assets/shims/bb001e9478210c3d93dbf8c2a48434a4b9cefab6b841694805fedb1e358942cc rename to assets/shims/bb001e9478210c3d93dbf8c2a48434a4b9cefab6b841694805fedb1e358942cc diff --git a/services/assets/shims/c0052d3373d729a1327c0af5baa30f0aa7cc965ebbef3d8a61235c07f61696e7 b/assets/shims/c0052d3373d729a1327c0af5baa30f0aa7cc965ebbef3d8a61235c07f61696e7 similarity index 100% rename from services/assets/shims/c0052d3373d729a1327c0af5baa30f0aa7cc965ebbef3d8a61235c07f61696e7 rename to assets/shims/c0052d3373d729a1327c0af5baa30f0aa7cc965ebbef3d8a61235c07f61696e7 diff --git a/services/assets/shims/c63d24b8dc82b97fcb837bffcd8deaee99f8d4904fa14174eb46ab3073f1e286 b/assets/shims/c63d24b8dc82b97fcb837bffcd8deaee99f8d4904fa14174eb46ab3073f1e286 similarity index 100% rename from services/assets/shims/c63d24b8dc82b97fcb837bffcd8deaee99f8d4904fa14174eb46ab3073f1e286 rename to assets/shims/c63d24b8dc82b97fcb837bffcd8deaee99f8d4904fa14174eb46ab3073f1e286 diff --git a/services/assets/shims/c67560ac14d188780faf3f5414aa7d23739c38707c5c9d42e12a7c2e497437a0 b/assets/shims/c67560ac14d188780faf3f5414aa7d23739c38707c5c9d42e12a7c2e497437a0 similarity index 100% rename from services/assets/shims/c67560ac14d188780faf3f5414aa7d23739c38707c5c9d42e12a7c2e497437a0 rename to assets/shims/c67560ac14d188780faf3f5414aa7d23739c38707c5c9d42e12a7c2e497437a0 diff --git a/services/assets/shims/c6764f47d48be60e6ff89d9b8c04fbae1ed83446b0f8fc451af5a2d9c6f31f28 b/assets/shims/c6764f47d48be60e6ff89d9b8c04fbae1ed83446b0f8fc451af5a2d9c6f31f28 similarity index 100% rename from services/assets/shims/c6764f47d48be60e6ff89d9b8c04fbae1ed83446b0f8fc451af5a2d9c6f31f28 rename to assets/shims/c6764f47d48be60e6ff89d9b8c04fbae1ed83446b0f8fc451af5a2d9c6f31f28 diff --git a/services/assets/shims/ca15018eb35551aff571a9a5c97930a15ad6e216727b531574e8e97e624968e5 b/assets/shims/ca15018eb35551aff571a9a5c97930a15ad6e216727b531574e8e97e624968e5 similarity index 100% rename from services/assets/shims/ca15018eb35551aff571a9a5c97930a15ad6e216727b531574e8e97e624968e5 rename to assets/shims/ca15018eb35551aff571a9a5c97930a15ad6e216727b531574e8e97e624968e5 diff --git a/services/assets/shims/cb54aed125cd03336cdc12d650c40eaf496e7780b6e4123bf0c0d134f9eec253 b/assets/shims/cb54aed125cd03336cdc12d650c40eaf496e7780b6e4123bf0c0d134f9eec253 similarity index 100% rename from services/assets/shims/cb54aed125cd03336cdc12d650c40eaf496e7780b6e4123bf0c0d134f9eec253 rename to assets/shims/cb54aed125cd03336cdc12d650c40eaf496e7780b6e4123bf0c0d134f9eec253 diff --git a/services/assets/shims/cb5c70cfd0a8c55fe2c0bfb57afb2e678fb75775a2a413bf4ab87c37b36cfeac b/assets/shims/cb5c70cfd0a8c55fe2c0bfb57afb2e678fb75775a2a413bf4ab87c37b36cfeac similarity index 100% rename from services/assets/shims/cb5c70cfd0a8c55fe2c0bfb57afb2e678fb75775a2a413bf4ab87c37b36cfeac rename to assets/shims/cb5c70cfd0a8c55fe2c0bfb57afb2e678fb75775a2a413bf4ab87c37b36cfeac diff --git a/services/assets/shims/cb69ac7c3ed897958be255adce83a26a40f9362e99e5c5d9f26b17ef80bfdd0a b/assets/shims/cb69ac7c3ed897958be255adce83a26a40f9362e99e5c5d9f26b17ef80bfdd0a similarity index 100% rename from services/assets/shims/cb69ac7c3ed897958be255adce83a26a40f9362e99e5c5d9f26b17ef80bfdd0a rename to assets/shims/cb69ac7c3ed897958be255adce83a26a40f9362e99e5c5d9f26b17ef80bfdd0a diff --git a/services/assets/shims/cc787cd08ecb21f3f723f6d1a2e979e8dc59956cde8f872e6eb3a95b8c998824 b/assets/shims/cc787cd08ecb21f3f723f6d1a2e979e8dc59956cde8f872e6eb3a95b8c998824 similarity index 100% rename from services/assets/shims/cc787cd08ecb21f3f723f6d1a2e979e8dc59956cde8f872e6eb3a95b8c998824 rename to assets/shims/cc787cd08ecb21f3f723f6d1a2e979e8dc59956cde8f872e6eb3a95b8c998824 diff --git a/services/assets/shims/ccedee6c00016e14787cc03e1311b62266f4bb7919cb5bb1329f73cf2bf5e07a b/assets/shims/ccedee6c00016e14787cc03e1311b62266f4bb7919cb5bb1329f73cf2bf5e07a similarity index 100% rename from services/assets/shims/ccedee6c00016e14787cc03e1311b62266f4bb7919cb5bb1329f73cf2bf5e07a rename to assets/shims/ccedee6c00016e14787cc03e1311b62266f4bb7919cb5bb1329f73cf2bf5e07a diff --git a/services/assets/shims/cdbcf7cf072fb3dcf0ea43484047927684f61f0b96e12bc6d9ec2a3c2f8ce822 b/assets/shims/cdbcf7cf072fb3dcf0ea43484047927684f61f0b96e12bc6d9ec2a3c2f8ce822 similarity index 100% rename from services/assets/shims/cdbcf7cf072fb3dcf0ea43484047927684f61f0b96e12bc6d9ec2a3c2f8ce822 rename to assets/shims/cdbcf7cf072fb3dcf0ea43484047927684f61f0b96e12bc6d9ec2a3c2f8ce822 diff --git a/services/assets/shims/ce6ed017cb3cccb7b95577810355d5c09324d4cae562b88a271d40a8452c3ea4 b/assets/shims/ce6ed017cb3cccb7b95577810355d5c09324d4cae562b88a271d40a8452c3ea4 similarity index 100% rename from services/assets/shims/ce6ed017cb3cccb7b95577810355d5c09324d4cae562b88a271d40a8452c3ea4 rename to assets/shims/ce6ed017cb3cccb7b95577810355d5c09324d4cae562b88a271d40a8452c3ea4 diff --git a/services/assets/shims/cfbfe7bee6558af6c1ead418ff9ff88cf8fad6a8fd8e14c56cb3fda8888108b8 b/assets/shims/cfbfe7bee6558af6c1ead418ff9ff88cf8fad6a8fd8e14c56cb3fda8888108b8 similarity index 100% rename from services/assets/shims/cfbfe7bee6558af6c1ead418ff9ff88cf8fad6a8fd8e14c56cb3fda8888108b8 rename to assets/shims/cfbfe7bee6558af6c1ead418ff9ff88cf8fad6a8fd8e14c56cb3fda8888108b8 diff --git a/services/assets/shims/d0385029744717bd23ef43ec2549b7082de55f676f17dc7af505a222dce22198 b/assets/shims/d0385029744717bd23ef43ec2549b7082de55f676f17dc7af505a222dce22198 similarity index 100% rename from services/assets/shims/d0385029744717bd23ef43ec2549b7082de55f676f17dc7af505a222dce22198 rename to assets/shims/d0385029744717bd23ef43ec2549b7082de55f676f17dc7af505a222dce22198 diff --git a/services/assets/shims/d0def7908ea07e848cdeca6a757711f099405ffa67113287114da02280d6fc98 b/assets/shims/d0def7908ea07e848cdeca6a757711f099405ffa67113287114da02280d6fc98 similarity index 100% rename from services/assets/shims/d0def7908ea07e848cdeca6a757711f099405ffa67113287114da02280d6fc98 rename to assets/shims/d0def7908ea07e848cdeca6a757711f099405ffa67113287114da02280d6fc98 diff --git a/services/assets/shims/d922b17c61ea580cd1adc60dd5d385db7056b3ad7a0697a688f7c831a6c394b4 b/assets/shims/d922b17c61ea580cd1adc60dd5d385db7056b3ad7a0697a688f7c831a6c394b4 similarity index 100% rename from services/assets/shims/d922b17c61ea580cd1adc60dd5d385db7056b3ad7a0697a688f7c831a6c394b4 rename to assets/shims/d922b17c61ea580cd1adc60dd5d385db7056b3ad7a0697a688f7c831a6c394b4 diff --git a/services/assets/shims/db21ed026968dba58c6e2e60a1a1fbbbd45217904748ec0a30e5e191deef8dd4 b/assets/shims/db21ed026968dba58c6e2e60a1a1fbbbd45217904748ec0a30e5e191deef8dd4 similarity index 100% rename from services/assets/shims/db21ed026968dba58c6e2e60a1a1fbbbd45217904748ec0a30e5e191deef8dd4 rename to assets/shims/db21ed026968dba58c6e2e60a1a1fbbbd45217904748ec0a30e5e191deef8dd4 diff --git a/services/assets/shims/db3bfef1eed0069001002c243ce790357938e9701a7c45852d75b524a68d0e30 b/assets/shims/db3bfef1eed0069001002c243ce790357938e9701a7c45852d75b524a68d0e30 similarity index 100% rename from services/assets/shims/db3bfef1eed0069001002c243ce790357938e9701a7c45852d75b524a68d0e30 rename to assets/shims/db3bfef1eed0069001002c243ce790357938e9701a7c45852d75b524a68d0e30 diff --git a/services/assets/shims/db8ea4f269c006ec82bf3b921f96e13b419d32e44418732d024f7049296b384e b/assets/shims/db8ea4f269c006ec82bf3b921f96e13b419d32e44418732d024f7049296b384e similarity index 100% rename from services/assets/shims/db8ea4f269c006ec82bf3b921f96e13b419d32e44418732d024f7049296b384e rename to assets/shims/db8ea4f269c006ec82bf3b921f96e13b419d32e44418732d024f7049296b384e diff --git a/services/assets/shims/dcd66a7d466e3431916ae25bb773cca1569199733b8be4a92430310e3ecb0eed b/assets/shims/dcd66a7d466e3431916ae25bb773cca1569199733b8be4a92430310e3ecb0eed similarity index 100% rename from services/assets/shims/dcd66a7d466e3431916ae25bb773cca1569199733b8be4a92430310e3ecb0eed rename to assets/shims/dcd66a7d466e3431916ae25bb773cca1569199733b8be4a92430310e3ecb0eed diff --git a/services/assets/shims/df8877ee3e1f9ea5f7e67c8138a99e87d6e0f3e549e31b39a6ab84212ae6d084 b/assets/shims/df8877ee3e1f9ea5f7e67c8138a99e87d6e0f3e549e31b39a6ab84212ae6d084 similarity index 100% rename from services/assets/shims/df8877ee3e1f9ea5f7e67c8138a99e87d6e0f3e549e31b39a6ab84212ae6d084 rename to assets/shims/df8877ee3e1f9ea5f7e67c8138a99e87d6e0f3e549e31b39a6ab84212ae6d084 diff --git a/services/assets/shims/e011fbf70660ebcea0e0370b3e1813c9a6f5ee073b6a94dd0e6e0bc192fac121 b/assets/shims/e011fbf70660ebcea0e0370b3e1813c9a6f5ee073b6a94dd0e6e0bc192fac121 similarity index 100% rename from services/assets/shims/e011fbf70660ebcea0e0370b3e1813c9a6f5ee073b6a94dd0e6e0bc192fac121 rename to assets/shims/e011fbf70660ebcea0e0370b3e1813c9a6f5ee073b6a94dd0e6e0bc192fac121 diff --git a/services/assets/shims/e0c661944b99eae04a4ff5823769f4c40ed0766aac25277be6d322cc513fb8d7 b/assets/shims/e0c661944b99eae04a4ff5823769f4c40ed0766aac25277be6d322cc513fb8d7 similarity index 100% rename from services/assets/shims/e0c661944b99eae04a4ff5823769f4c40ed0766aac25277be6d322cc513fb8d7 rename to assets/shims/e0c661944b99eae04a4ff5823769f4c40ed0766aac25277be6d322cc513fb8d7 diff --git a/services/assets/shims/e46d93092657081027e793037d11f72ac528de72942f593bbb9b9edc5a86ec13 b/assets/shims/e46d93092657081027e793037d11f72ac528de72942f593bbb9b9edc5a86ec13 similarity index 100% rename from services/assets/shims/e46d93092657081027e793037d11f72ac528de72942f593bbb9b9edc5a86ec13 rename to assets/shims/e46d93092657081027e793037d11f72ac528de72942f593bbb9b9edc5a86ec13 diff --git a/services/assets/shims/e764164c4d2c7029700c7f43b622880b41c7ed195ebee77238df2ac65066ebd4 b/assets/shims/e764164c4d2c7029700c7f43b622880b41c7ed195ebee77238df2ac65066ebd4 similarity index 100% rename from services/assets/shims/e764164c4d2c7029700c7f43b622880b41c7ed195ebee77238df2ac65066ebd4 rename to assets/shims/e764164c4d2c7029700c7f43b622880b41c7ed195ebee77238df2ac65066ebd4 diff --git a/services/assets/shims/e7ab50479666c5e49d9df1f4ca259f1051de7b316d6f7818122b55fff457d809 b/assets/shims/e7ab50479666c5e49d9df1f4ca259f1051de7b316d6f7818122b55fff457d809 similarity index 100% rename from services/assets/shims/e7ab50479666c5e49d9df1f4ca259f1051de7b316d6f7818122b55fff457d809 rename to assets/shims/e7ab50479666c5e49d9df1f4ca259f1051de7b316d6f7818122b55fff457d809 diff --git a/services/assets/shims/e881b7a80ee78c79ab1414641677a03205f5d90f3b3cb498b9a2405015279fb3 b/assets/shims/e881b7a80ee78c79ab1414641677a03205f5d90f3b3cb498b9a2405015279fb3 similarity index 100% rename from services/assets/shims/e881b7a80ee78c79ab1414641677a03205f5d90f3b3cb498b9a2405015279fb3 rename to assets/shims/e881b7a80ee78c79ab1414641677a03205f5d90f3b3cb498b9a2405015279fb3 diff --git a/services/assets/shims/e96b6dd08106270a8ef3c818f2926fce4de6baba320863c54b28e30c3b8a5e39 b/assets/shims/e96b6dd08106270a8ef3c818f2926fce4de6baba320863c54b28e30c3b8a5e39 similarity index 100% rename from services/assets/shims/e96b6dd08106270a8ef3c818f2926fce4de6baba320863c54b28e30c3b8a5e39 rename to assets/shims/e96b6dd08106270a8ef3c818f2926fce4de6baba320863c54b28e30c3b8a5e39 diff --git a/services/assets/shims/e9907a24e0140beb6ce385df6ed6fcd95c2044e17d05700d41f9e2085099926b b/assets/shims/e9907a24e0140beb6ce385df6ed6fcd95c2044e17d05700d41f9e2085099926b similarity index 100% rename from services/assets/shims/e9907a24e0140beb6ce385df6ed6fcd95c2044e17d05700d41f9e2085099926b rename to assets/shims/e9907a24e0140beb6ce385df6ed6fcd95c2044e17d05700d41f9e2085099926b diff --git a/services/assets/shims/ea1f25f04d4eab15600fc9df46385e318eae3543b68968783313424eb92e073d b/assets/shims/ea1f25f04d4eab15600fc9df46385e318eae3543b68968783313424eb92e073d similarity index 100% rename from services/assets/shims/ea1f25f04d4eab15600fc9df46385e318eae3543b68968783313424eb92e073d rename to assets/shims/ea1f25f04d4eab15600fc9df46385e318eae3543b68968783313424eb92e073d diff --git a/services/assets/shims/ea692d8f059afbd68ffea7105bd473f65b7210684f996b9bfec89c2fa19f6d1d b/assets/shims/ea692d8f059afbd68ffea7105bd473f65b7210684f996b9bfec89c2fa19f6d1d similarity index 100% rename from services/assets/shims/ea692d8f059afbd68ffea7105bd473f65b7210684f996b9bfec89c2fa19f6d1d rename to assets/shims/ea692d8f059afbd68ffea7105bd473f65b7210684f996b9bfec89c2fa19f6d1d diff --git a/services/assets/shims/ec71004af47e3de67eb3a6402cd0028badd5959e7c320af8b74c31c3e78b0fad b/assets/shims/ec71004af47e3de67eb3a6402cd0028badd5959e7c320af8b74c31c3e78b0fad similarity index 100% rename from services/assets/shims/ec71004af47e3de67eb3a6402cd0028badd5959e7c320af8b74c31c3e78b0fad rename to assets/shims/ec71004af47e3de67eb3a6402cd0028badd5959e7c320af8b74c31c3e78b0fad diff --git a/services/assets/shims/ed84369d80eacbbafa7dfdab5ebbc882ae3cec75e42bd19697cdc3ed44b1de13 b/assets/shims/ed84369d80eacbbafa7dfdab5ebbc882ae3cec75e42bd19697cdc3ed44b1de13 similarity index 100% rename from services/assets/shims/ed84369d80eacbbafa7dfdab5ebbc882ae3cec75e42bd19697cdc3ed44b1de13 rename to assets/shims/ed84369d80eacbbafa7dfdab5ebbc882ae3cec75e42bd19697cdc3ed44b1de13 diff --git a/services/assets/shims/ee0add7389e054d037034c15d63821e5276dc8721fca15e2d051dd5a84ed3b03 b/assets/shims/ee0add7389e054d037034c15d63821e5276dc8721fca15e2d051dd5a84ed3b03 similarity index 100% rename from services/assets/shims/ee0add7389e054d037034c15d63821e5276dc8721fca15e2d051dd5a84ed3b03 rename to assets/shims/ee0add7389e054d037034c15d63821e5276dc8721fca15e2d051dd5a84ed3b03 diff --git a/services/assets/shims/f06f72ddb711caa7e9a404b560427eb436c6eaa70a8efc7e530ad1e5ca832bad b/assets/shims/f06f72ddb711caa7e9a404b560427eb436c6eaa70a8efc7e530ad1e5ca832bad similarity index 100% rename from services/assets/shims/f06f72ddb711caa7e9a404b560427eb436c6eaa70a8efc7e530ad1e5ca832bad rename to assets/shims/f06f72ddb711caa7e9a404b560427eb436c6eaa70a8efc7e530ad1e5ca832bad diff --git a/services/assets/shims/f0c66d6e8f4c2f57773d3464c0a099a60761175a768aa605c12e0c4c21c93a86 b/assets/shims/f0c66d6e8f4c2f57773d3464c0a099a60761175a768aa605c12e0c4c21c93a86 similarity index 100% rename from services/assets/shims/f0c66d6e8f4c2f57773d3464c0a099a60761175a768aa605c12e0c4c21c93a86 rename to assets/shims/f0c66d6e8f4c2f57773d3464c0a099a60761175a768aa605c12e0c4c21c93a86 diff --git a/services/assets/shims/f1e84014e4777732f70c23d0a40c4c920b0887d1af94706da96375b0ebec51a3 b/assets/shims/f1e84014e4777732f70c23d0a40c4c920b0887d1af94706da96375b0ebec51a3 similarity index 100% rename from services/assets/shims/f1e84014e4777732f70c23d0a40c4c920b0887d1af94706da96375b0ebec51a3 rename to assets/shims/f1e84014e4777732f70c23d0a40c4c920b0887d1af94706da96375b0ebec51a3 diff --git a/services/assets/shims/f204549fac6991cc09f8ece7bb5c7b8a8fa06ac9afe47f961dba250030c55900 b/assets/shims/f204549fac6991cc09f8ece7bb5c7b8a8fa06ac9afe47f961dba250030c55900 similarity index 100% rename from services/assets/shims/f204549fac6991cc09f8ece7bb5c7b8a8fa06ac9afe47f961dba250030c55900 rename to assets/shims/f204549fac6991cc09f8ece7bb5c7b8a8fa06ac9afe47f961dba250030c55900 diff --git a/services/assets/shims/f2cb78c36981809fb5c02036e3b4af52412f54b7a4417f6239b2bcca0ce0ace3 b/assets/shims/f2cb78c36981809fb5c02036e3b4af52412f54b7a4417f6239b2bcca0ce0ace3 similarity index 100% rename from services/assets/shims/f2cb78c36981809fb5c02036e3b4af52412f54b7a4417f6239b2bcca0ce0ace3 rename to assets/shims/f2cb78c36981809fb5c02036e3b4af52412f54b7a4417f6239b2bcca0ce0ace3 diff --git a/services/assets/shims/f651ad9f356e46deda244cb8538d7e8169db30f932ba21df6bc1dfd3625d01f8 b/assets/shims/f651ad9f356e46deda244cb8538d7e8169db30f932ba21df6bc1dfd3625d01f8 similarity index 100% rename from services/assets/shims/f651ad9f356e46deda244cb8538d7e8169db30f932ba21df6bc1dfd3625d01f8 rename to assets/shims/f651ad9f356e46deda244cb8538d7e8169db30f932ba21df6bc1dfd3625d01f8 diff --git a/services/assets/shims/fcee7f345df8d4880de5de2a5154a6f1be48b186d12f73d61f396b673d5053fd b/assets/shims/fcee7f345df8d4880de5de2a5154a6f1be48b186d12f73d61f396b673d5053fd similarity index 100% rename from services/assets/shims/fcee7f345df8d4880de5de2a5154a6f1be48b186d12f73d61f396b673d5053fd rename to assets/shims/fcee7f345df8d4880de5de2a5154a6f1be48b186d12f73d61f396b673d5053fd diff --git a/services/assets/sour/data/menus.cfg b/assets/sour/data/menus.cfg similarity index 100% rename from services/assets/sour/data/menus.cfg rename to assets/sour/data/menus.cfg diff --git a/services/assets/sour/packages/base/xmwhub.ogz b/assets/sour/packages/base/xmwhub.ogz similarity index 100% rename from services/assets/sour/packages/base/xmwhub.ogz rename to assets/sour/packages/base/xmwhub.ogz diff --git a/services/assets/sour/packages/textures/downloading.png b/assets/sour/packages/textures/downloading.png similarity index 100% rename from services/assets/sour/packages/textures/downloading.png rename to assets/sour/packages/textures/downloading.png diff --git a/services/assets/split-base.py b/assets/split-base.py similarity index 100% rename from services/assets/split-base.py rename to assets/split-base.py diff --git a/services/assets/split-quad.py b/assets/split-quad.py similarity index 100% rename from services/assets/split-quad.py rename to assets/split-quad.py diff --git a/services/client/.gitignore b/client/.gitignore similarity index 100% rename from services/client/.gitignore rename to client/.gitignore diff --git a/services/client/.prettierignore b/client/.prettierignore similarity index 100% rename from services/client/.prettierignore rename to client/.prettierignore diff --git a/services/client/.prettierrc b/client/.prettierrc similarity index 100% rename from services/client/.prettierrc rename to client/.prettierrc diff --git a/services/client/build b/client/build similarity index 100% rename from services/client/build rename to client/build diff --git a/services/client/definitions/global.d.ts b/client/definitions/global.d.ts similarity index 100% rename from services/client/definitions/global.d.ts rename to client/definitions/global.d.ts diff --git a/services/client/definitions/nipple.d.ts b/client/definitions/nipple.d.ts similarity index 100% rename from services/client/definitions/nipple.d.ts rename to client/definitions/nipple.d.ts diff --git a/services/client/package.json b/client/package.json similarity index 100% rename from services/client/package.json rename to client/package.json diff --git a/services/client/scripts/serve b/client/scripts/serve similarity index 100% rename from services/client/scripts/serve rename to client/scripts/serve diff --git a/services/client/src/FileDropper.tsx b/client/src/FileDropper.tsx similarity index 100% rename from services/client/src/FileDropper.tsx rename to client/src/FileDropper.tsx diff --git a/services/client/src/Loading.tsx b/client/src/Loading.tsx similarity index 100% rename from services/client/src/Loading.tsx rename to client/src/Loading.tsx diff --git a/services/client/src/MobileControls.tsx b/client/src/MobileControls.tsx similarity index 100% rename from services/client/src/MobileControls.tsx rename to client/src/MobileControls.tsx diff --git a/services/client/src/assets/hook.ts b/client/src/assets/hook.ts similarity index 100% rename from services/client/src/assets/hook.ts rename to client/src/assets/hook.ts diff --git a/services/client/src/assets/storage.ts b/client/src/assets/storage.ts similarity index 100% rename from services/client/src/assets/storage.ts rename to client/src/assets/storage.ts diff --git a/services/client/src/assets/types.ts b/client/src/assets/types.ts similarity index 100% rename from services/client/src/assets/types.ts rename to client/src/assets/types.ts diff --git a/services/client/src/assets/utils.ts b/client/src/assets/utils.ts similarity index 100% rename from services/client/src/assets/utils.ts rename to client/src/assets/utils.ts diff --git a/services/client/src/assets/worker.ts b/client/src/assets/worker.ts similarity index 100% rename from services/client/src/assets/worker.ts rename to client/src/assets/worker.ts diff --git a/services/client/src/background.png b/client/src/background.png similarity index 100% rename from services/client/src/background.png rename to client/src/background.png diff --git a/services/client/src/config.ts b/client/src/config.ts similarity index 97% rename from services/client/src/config.ts rename to client/src/config.ts index a8de838d5..17c1bb482 100644 --- a/services/client/src/config.ts +++ b/client/src/config.ts @@ -56,7 +56,7 @@ function fillAssetHost(url: string): string { function getInjected(): Maybe { try { - const injected = INJECTED_SOUR_CONFIG + const injected = INJECTED_SOUR_CONFIG.client // This will never run if INJECTED_SOUR_CONFIG is not defined return injected } catch (e) { diff --git a/services/client/src/discord.ts b/client/src/discord.ts similarity index 100% rename from services/client/src/discord.ts rename to client/src/discord.ts diff --git a/services/client/src/favicon.ico b/client/src/favicon.ico similarity index 100% rename from services/client/src/favicon.ico rename to client/src/favicon.ico diff --git a/services/client/src/game.ts b/client/src/game.ts similarity index 100% rename from services/client/src/game.ts rename to client/src/game.ts diff --git a/services/client/src/index.html b/client/src/index.html similarity index 100% rename from services/client/src/index.html rename to client/src/index.html diff --git a/services/client/src/index.tsx b/client/src/index.tsx similarity index 99% rename from services/client/src/index.tsx rename to client/src/index.tsx index 061f6a6ab..23f564fcb 100644 --- a/services/client/src/index.tsx +++ b/client/src/index.tsx @@ -706,7 +706,7 @@ function App() { log.info('creating private game...') ;(async () => { try { - console.log(`creategame ${preset} ${mode}`); + console.log(`creategame ${preset} ${mode}`) const result = await runCommand(`creategame ${preset} ${mode}`) log.success('created game!') } catch (e) { diff --git a/services/client/src/logging.ts b/client/src/logging.ts similarity index 100% rename from services/client/src/logging.ts rename to client/src/logging.ts diff --git a/services/client/src/names.ts b/client/src/names.ts similarity index 100% rename from services/client/src/names.ts rename to client/src/names.ts diff --git a/services/client/src/protocol.ts b/client/src/protocol.ts similarity index 100% rename from services/client/src/protocol.ts rename to client/src/protocol.ts diff --git a/services/client/src/static/action.jpg b/client/src/static/action.jpg similarity index 100% rename from services/client/src/static/action.jpg rename to client/src/static/action.jpg diff --git a/services/client/src/static/arrow_bw.jpg b/client/src/static/arrow_bw.jpg similarity index 100% rename from services/client/src/static/arrow_bw.jpg rename to client/src/static/arrow_bw.jpg diff --git a/services/client/src/static/arrow_fw.jpg b/client/src/static/arrow_fw.jpg similarity index 100% rename from services/client/src/static/arrow_fw.jpg rename to client/src/static/arrow_fw.jpg diff --git a/services/client/src/static/blank.png b/client/src/static/blank.png similarity index 100% rename from services/client/src/static/blank.png rename to client/src/static/blank.png diff --git a/services/client/src/static/brush_1c.png b/client/src/static/brush_1c.png similarity index 100% rename from services/client/src/static/brush_1c.png rename to client/src/static/brush_1c.png diff --git a/services/client/src/static/brush_21c.png b/client/src/static/brush_21c.png similarity index 100% rename from services/client/src/static/brush_21c.png rename to client/src/static/brush_21c.png diff --git a/services/client/src/static/brush_3s.png b/client/src/static/brush_3s.png similarity index 100% rename from services/client/src/static/brush_3s.png rename to client/src/static/brush_3s.png diff --git a/services/client/src/static/brush_421c.png b/client/src/static/brush_421c.png similarity index 100% rename from services/client/src/static/brush_421c.png rename to client/src/static/brush_421c.png diff --git a/services/client/src/static/brush_5s.png b/client/src/static/brush_5s.png similarity index 100% rename from services/client/src/static/brush_5s.png rename to client/src/static/brush_5s.png diff --git a/services/client/src/static/brush_7s.png b/client/src/static/brush_7s.png similarity index 100% rename from services/client/src/static/brush_7s.png rename to client/src/static/brush_7s.png diff --git a/services/client/src/static/captaincannon.jpg b/client/src/static/captaincannon.jpg similarity index 100% rename from services/client/src/static/captaincannon.jpg rename to client/src/static/captaincannon.jpg diff --git a/services/client/src/static/captaincannon_blue.jpg b/client/src/static/captaincannon_blue.jpg similarity index 100% rename from services/client/src/static/captaincannon_blue.jpg rename to client/src/static/captaincannon_blue.jpg diff --git a/services/client/src/static/captaincannon_red.jpg b/client/src/static/captaincannon_red.jpg similarity index 100% rename from services/client/src/static/captaincannon_red.jpg rename to client/src/static/captaincannon_red.jpg diff --git a/services/client/src/static/chat.jpg b/client/src/static/chat.jpg similarity index 100% rename from services/client/src/static/chat.jpg rename to client/src/static/chat.jpg diff --git a/services/client/src/static/checkbox_off.jpg b/client/src/static/checkbox_off.jpg similarity index 100% rename from services/client/src/static/checkbox_off.jpg rename to client/src/static/checkbox_off.jpg diff --git a/services/client/src/static/checkbox_on.jpg b/client/src/static/checkbox_on.jpg similarity index 100% rename from services/client/src/static/checkbox_on.jpg rename to client/src/static/checkbox_on.jpg diff --git a/services/client/src/static/chest.jpg b/client/src/static/chest.jpg similarity index 100% rename from services/client/src/static/chest.jpg rename to client/src/static/chest.jpg diff --git a/services/client/src/static/coins.jpg b/client/src/static/coins.jpg similarity index 100% rename from services/client/src/static/coins.jpg rename to client/src/static/coins.jpg diff --git a/services/client/src/static/cube.jpg b/client/src/static/cube.jpg similarity index 100% rename from services/client/src/static/cube.jpg rename to client/src/static/cube.jpg diff --git a/services/client/src/static/exit.jpg b/client/src/static/exit.jpg similarity index 100% rename from services/client/src/static/exit.jpg rename to client/src/static/exit.jpg diff --git a/services/client/src/static/grenade.png b/client/src/static/grenade.png similarity index 100% rename from services/client/src/static/grenade.png rename to client/src/static/grenade.png diff --git a/services/client/src/static/hand.jpg b/client/src/static/hand.jpg similarity index 100% rename from services/client/src/static/hand.jpg rename to client/src/static/hand.jpg diff --git a/services/client/src/static/info.jpg b/client/src/static/info.jpg similarity index 100% rename from services/client/src/static/info.jpg rename to client/src/static/info.jpg diff --git a/services/client/src/static/inky.jpg b/client/src/static/inky.jpg similarity index 100% rename from services/client/src/static/inky.jpg rename to client/src/static/inky.jpg diff --git a/services/client/src/static/inky_blue.jpg b/client/src/static/inky_blue.jpg similarity index 100% rename from services/client/src/static/inky_blue.jpg rename to client/src/static/inky_blue.jpg diff --git a/services/client/src/static/inky_red.jpg b/client/src/static/inky_red.jpg similarity index 100% rename from services/client/src/static/inky_red.jpg rename to client/src/static/inky_red.jpg diff --git a/services/client/src/static/machinegun.png b/client/src/static/machinegun.png similarity index 100% rename from services/client/src/static/machinegun.png rename to client/src/static/machinegun.png diff --git a/services/client/src/static/map.jpg b/client/src/static/map.jpg similarity index 100% rename from services/client/src/static/map.jpg rename to client/src/static/map.jpg diff --git a/services/client/src/static/menu.jpg b/client/src/static/menu.jpg similarity index 100% rename from services/client/src/static/menu.jpg rename to client/src/static/menu.jpg diff --git a/services/client/src/static/mrfixit.jpg b/client/src/static/mrfixit.jpg similarity index 100% rename from services/client/src/static/mrfixit.jpg rename to client/src/static/mrfixit.jpg diff --git a/services/client/src/static/mrfixit_blue.jpg b/client/src/static/mrfixit_blue.jpg similarity index 100% rename from services/client/src/static/mrfixit_blue.jpg rename to client/src/static/mrfixit_blue.jpg diff --git a/services/client/src/static/mrfixit_red.jpg b/client/src/static/mrfixit_red.jpg similarity index 100% rename from services/client/src/static/mrfixit_red.jpg rename to client/src/static/mrfixit_red.jpg diff --git a/services/client/src/static/ogro.jpg b/client/src/static/ogro.jpg similarity index 100% rename from services/client/src/static/ogro.jpg rename to client/src/static/ogro.jpg diff --git a/services/client/src/static/ogro_blue.jpg b/client/src/static/ogro_blue.jpg similarity index 100% rename from services/client/src/static/ogro_blue.jpg rename to client/src/static/ogro_blue.jpg diff --git a/services/client/src/static/ogro_red.jpg b/client/src/static/ogro_red.jpg similarity index 100% rename from services/client/src/static/ogro_red.jpg rename to client/src/static/ogro_red.jpg diff --git a/services/client/src/static/pistol.png b/client/src/static/pistol.png similarity index 100% rename from services/client/src/static/pistol.png rename to client/src/static/pistol.png diff --git a/services/client/src/static/radio_off.jpg b/client/src/static/radio_off.jpg similarity index 100% rename from services/client/src/static/radio_off.jpg rename to client/src/static/radio_off.jpg diff --git a/services/client/src/static/radio_on.jpg b/client/src/static/radio_on.jpg similarity index 100% rename from services/client/src/static/radio_on.jpg rename to client/src/static/radio_on.jpg diff --git a/services/client/src/static/readme.txt b/client/src/static/readme.txt similarity index 100% rename from services/client/src/static/readme.txt rename to client/src/static/readme.txt diff --git a/services/client/src/static/rifle.png b/client/src/static/rifle.png similarity index 100% rename from services/client/src/static/rifle.png rename to client/src/static/rifle.png diff --git a/services/client/src/static/rocket.png b/client/src/static/rocket.png similarity index 100% rename from services/client/src/static/rocket.png rename to client/src/static/rocket.png diff --git a/services/client/src/static/sauer.jpg b/client/src/static/sauer.jpg similarity index 100% rename from services/client/src/static/sauer.jpg rename to client/src/static/sauer.jpg diff --git a/services/client/src/static/saw.png b/client/src/static/saw.png similarity index 100% rename from services/client/src/static/saw.png rename to client/src/static/saw.png diff --git a/services/client/src/static/server.jpg b/client/src/static/server.jpg similarity index 100% rename from services/client/src/static/server.jpg rename to client/src/static/server.jpg diff --git a/services/client/src/static/serverfull.jpg b/client/src/static/serverfull.jpg similarity index 100% rename from services/client/src/static/serverfull.jpg rename to client/src/static/serverfull.jpg diff --git a/services/client/src/static/serverlock.jpg b/client/src/static/serverlock.jpg similarity index 100% rename from services/client/src/static/serverlock.jpg rename to client/src/static/serverlock.jpg diff --git a/services/client/src/static/serverpriv.jpg b/client/src/static/serverpriv.jpg similarity index 100% rename from services/client/src/static/serverpriv.jpg rename to client/src/static/serverpriv.jpg diff --git a/services/client/src/static/serverunk.jpg b/client/src/static/serverunk.jpg similarity index 100% rename from services/client/src/static/serverunk.jpg rename to client/src/static/serverunk.jpg diff --git a/services/client/src/static/shotgun.png b/client/src/static/shotgun.png similarity index 100% rename from services/client/src/static/shotgun.png rename to client/src/static/shotgun.png diff --git a/services/client/src/static/snoutx10k.jpg b/client/src/static/snoutx10k.jpg similarity index 100% rename from services/client/src/static/snoutx10k.jpg rename to client/src/static/snoutx10k.jpg diff --git a/services/client/src/static/snoutx10k_blue.jpg b/client/src/static/snoutx10k_blue.jpg similarity index 100% rename from services/client/src/static/snoutx10k_blue.jpg rename to client/src/static/snoutx10k_blue.jpg diff --git a/services/client/src/static/snoutx10k_red.jpg b/client/src/static/snoutx10k_red.jpg similarity index 100% rename from services/client/src/static/snoutx10k_red.jpg rename to client/src/static/snoutx10k_red.jpg diff --git a/services/client/src/static/spectator.jpg b/client/src/static/spectator.jpg similarity index 100% rename from services/client/src/static/spectator.jpg rename to client/src/static/spectator.jpg diff --git a/services/client/src/static/sword.jpg b/client/src/static/sword.jpg similarity index 100% rename from services/client/src/static/sword.jpg rename to client/src/static/sword.jpg diff --git a/services/client/src/types.ts b/client/src/types.ts similarity index 100% rename from services/client/src/types.ts rename to client/src/types.ts diff --git a/services/client/src/unsafe-startup.d.ts b/client/src/unsafe-startup.d.ts similarity index 100% rename from services/client/src/unsafe-startup.d.ts rename to client/src/unsafe-startup.d.ts diff --git a/services/client/src/unsafe-startup.js b/client/src/unsafe-startup.js similarity index 100% rename from services/client/src/unsafe-startup.js rename to client/src/unsafe-startup.js diff --git a/services/client/src/utils.ts b/client/src/utils.ts similarity index 100% rename from services/client/src/utils.ts rename to client/src/utils.ts diff --git a/services/client/tsconfig.json b/client/tsconfig.json similarity index 100% rename from services/client/tsconfig.json rename to client/tsconfig.json diff --git a/services/client/yarn.lock b/client/yarn.lock similarity index 100% rename from services/client/yarn.lock rename to client/yarn.lock diff --git a/services/go/cmd/client/main.go b/cmd/client/main.go similarity index 100% rename from services/go/cmd/client/main.go rename to cmd/client/main.go diff --git a/services/go/cmd/cs/main.go b/cmd/cs/main.go similarity index 100% rename from services/go/cmd/cs/main.go rename to cmd/cs/main.go index 6921ba050..9e62cde6e 100644 --- a/services/go/cmd/cs/main.go +++ b/cmd/cs/main.go @@ -1,8 +1,8 @@ package main import ( - "log" "github.com/cfoust/sour/pkg/cs" + "log" ) func Test(a int) { diff --git a/services/go/cmd/demo/main.go b/cmd/demo/main.go similarity index 100% rename from services/go/cmd/demo/main.go rename to cmd/demo/main.go index 98e1cc29f..dd3377748 100644 --- a/services/go/cmd/demo/main.go +++ b/cmd/demo/main.go @@ -4,8 +4,8 @@ import ( "compress/gzip" "flag" "io" - "os" "log" + "os" "time" I "github.com/cfoust/sour/pkg/game/io" diff --git a/services/go/cmd/ogz/main.go b/cmd/ogz/main.go similarity index 100% rename from services/go/cmd/ogz/main.go rename to cmd/ogz/main.go diff --git a/services/go/svc/cluster/auth/crypto/crypto.cpp b/cmd/server/auth/crypto/crypto.cpp similarity index 100% rename from services/go/svc/cluster/auth/crypto/crypto.cpp rename to cmd/server/auth/crypto/crypto.cpp diff --git a/services/go/svc/cluster/auth/crypto/crypto.go b/cmd/server/auth/crypto/crypto.go similarity index 100% rename from services/go/svc/cluster/auth/crypto/crypto.go rename to cmd/server/auth/crypto/crypto.go diff --git a/services/go/svc/cluster/auth/crypto/crypto.h b/cmd/server/auth/crypto/crypto.h similarity index 100% rename from services/go/svc/cluster/auth/crypto/crypto.h rename to cmd/server/auth/crypto/crypto.h diff --git a/services/go/svc/cluster/auth/crypto/crypto.swigcxx b/cmd/server/auth/crypto/crypto.swigcxx similarity index 100% rename from services/go/svc/cluster/auth/crypto/crypto.swigcxx rename to cmd/server/auth/crypto/crypto.swigcxx diff --git a/services/go/svc/cluster/auth/crypto/tools.h b/cmd/server/auth/crypto/tools.h similarity index 100% rename from services/go/svc/cluster/auth/crypto/tools.h rename to cmd/server/auth/crypto/tools.h diff --git a/services/go/svc/cluster/auth/module.go b/cmd/server/auth/module.go similarity index 98% rename from services/go/svc/cluster/auth/module.go rename to cmd/server/auth/module.go index aefe79284..29ea10ecd 100644 --- a/services/go/svc/cluster/auth/module.go +++ b/cmd/server/auth/module.go @@ -14,9 +14,9 @@ import ( "time" "unsafe" - "github.com/cfoust/sour/svc/cluster/auth/crypto" - "github.com/cfoust/sour/svc/cluster/config" - "github.com/cfoust/sour/svc/cluster/state" + "github.com/cfoust/sour/cmd/server/auth/crypto" + "github.com/cfoust/sour/cmd/server/config" + "github.com/cfoust/sour/cmd/server/state" "gorm.io/gorm" ) diff --git a/services/go/svc/cluster/config/module.go b/cmd/server/config/module.go similarity index 93% rename from services/go/svc/cluster/config/module.go rename to cmd/server/config/module.go index aa98b16db..6f8e89fa4 100644 --- a/services/go/svc/cluster/config/module.go +++ b/cmd/server/config/module.go @@ -2,9 +2,7 @@ package config import ( "encoding/json" - "errors" "fmt" - "os" "github.com/cfoust/sour/pkg/server" ) @@ -190,14 +188,9 @@ type Config struct { Discord DiscordSettings } -func GetSourConfig() (*Config, error) { - configJson, ok := os.LookupEnv("SOUR_CONFIG") - if !ok { - return nil, errors.New("SOUR_CONFIG not defined") - } - +func GetSourConfig(data []byte) (*Config, error) { var config Config - err := json.Unmarshal([]byte(configJson), &config) + err := json.Unmarshal(data, &config) if err != nil { return nil, err } diff --git a/services/go/svc/cluster/ingress/enet.go b/cmd/server/ingress/enet.go similarity index 99% rename from services/go/svc/cluster/ingress/enet.go rename to cmd/server/ingress/enet.go index bb3afaeb9..4321c8cd2 100644 --- a/services/go/svc/cluster/ingress/enet.go +++ b/cmd/server/ingress/enet.go @@ -6,10 +6,10 @@ import ( "sync" "time" + "github.com/cfoust/sour/cmd/server/state" "github.com/cfoust/sour/pkg/enet" "github.com/cfoust/sour/pkg/game/io" "github.com/cfoust/sour/pkg/utils" - "github.com/cfoust/sour/svc/cluster/state" ) type PacketACK struct { diff --git a/services/go/svc/cluster/ingress/module.go b/cmd/server/ingress/module.go similarity index 97% rename from services/go/svc/cluster/ingress/module.go rename to cmd/server/ingress/module.go index a687972e6..f94122aac 100644 --- a/services/go/svc/cluster/ingress/module.go +++ b/cmd/server/ingress/module.go @@ -1,9 +1,9 @@ package ingress import ( + "github.com/cfoust/sour/cmd/server/state" "github.com/cfoust/sour/pkg/game/io" "github.com/cfoust/sour/pkg/utils" - "github.com/cfoust/sour/svc/cluster/state" ) // A unique identifier for this client for the lifetime of their session. diff --git a/services/go/svc/cluster/ingress/ws.go b/cmd/server/ingress/ws.go similarity index 91% rename from services/go/svc/cluster/ingress/ws.go rename to cmd/server/ingress/ws.go index 3df0491c3..acfb227ee 100644 --- a/services/go/svc/cluster/ingress/ws.go +++ b/cmd/server/ingress/ws.go @@ -8,11 +8,11 @@ import ( "sync" "time" + "github.com/cfoust/sour/cmd/server/auth" + "github.com/cfoust/sour/cmd/server/state" + "github.com/cfoust/sour/cmd/server/watcher" "github.com/cfoust/sour/pkg/game/io" "github.com/cfoust/sour/pkg/utils" - "github.com/cfoust/sour/svc/cluster/auth" - "github.com/cfoust/sour/svc/cluster/state" - "github.com/cfoust/sour/svc/cluster/watcher" "github.com/fxamacker/cbor/v2" "github.com/mileusna/useragent" @@ -235,15 +235,13 @@ type WSIngress struct { mutex sync.Mutex serverWatcher *watcher.Watcher httpServer *http.Server - auth *auth.DiscordService } -func NewWSIngress(newClients chan Connection, auth *auth.DiscordService) *WSIngress { +func NewWSIngress(newClients chan Connection) *WSIngress { return &WSIngress{ newClients: newClients, clients: make(map[*WSClient]struct{}), serverWatcher: watcher.NewWatcher(), - auth: auth, } } @@ -266,38 +264,7 @@ func (server *WSIngress) RemoveClient(client *WSClient) { } func (server *WSIngress) HandleLogin(ctx context.Context, client *WSClient, code string) { - if server.auth == nil || code == "" { - client.authentication <- nil - return - } - - user, err := server.auth.AuthenticateCode(ctx, code) - - if err != nil { - log.Error().Err(err).Msg("user failed to log in") - response := DiscordCodeMessage{ - Op: AuthFailedOp, - Code: code, - } - bytes, _ := cbor.Marshal(response) - client.send <- bytes - return - } - - response := AuthSucceededMessage{ - Op: AuthSucceededOp, - Code: code, - User: auth.DiscordUser{ - Id: user.UUID, - Username: user.Username, - Discriminator: user.Discriminator, - Avatar: user.Avatar, - }, - PrivateKey: user.PrivateKey, - } - bytes, _ := cbor.Marshal(response) - client.send <- bytes - client.authentication <- user + client.authentication <- nil } func (server *WSIngress) HandleClient(ctx context.Context, c *websocket.Conn, host string, deviceType string) error { @@ -459,7 +426,7 @@ func (server *WSIngress) HandleClient(ctx context.Context, c *websocket.Conn, ho func (server *WSIngress) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Ignore the request, this sometimes happens - if r.URL.Path != "/" { + if r.URL.Path != "/ws/" { return } diff --git a/services/go/svc/cluster/main.go b/cmd/server/main.go similarity index 55% rename from services/go/svc/cluster/main.go rename to cmd/server/main.go index 135c64a5c..cc9f60663 100644 --- a/services/go/svc/cluster/main.go +++ b/cmd/server/main.go @@ -7,19 +7,14 @@ import ( "net/http" "os" "os/signal" - "path/filepath" - "runtime" - "runtime/pprof" "time" + "github.com/cfoust/sour/cmd/server/config" + "github.com/cfoust/sour/cmd/server/ingress" + "github.com/cfoust/sour/cmd/server/servers" + "github.com/cfoust/sour/cmd/server/service" + "github.com/cfoust/sour/cmd/server/static" "github.com/cfoust/sour/pkg/assets" - "github.com/cfoust/sour/svc/cluster/auth" - "github.com/cfoust/sour/svc/cluster/config" - "github.com/cfoust/sour/svc/cluster/ingress" - "github.com/cfoust/sour/svc/cluster/servers" - "github.com/cfoust/sour/svc/cluster/service" - "github.com/cfoust/sour/svc/cluster/state" - "github.com/cfoust/sour/svc/cluster/stores" "github.com/rs/zerolog" "github.com/rs/zerolog/log" @@ -27,83 +22,35 @@ import ( func main() { debug := flag.Bool("debug", false, "Whether to enable debug logging.") - cpuProfile := flag.String("cpu", "", "Write cpu profile to `file`.") - memProfile := flag.String("memory", "", "Write memory profile to `file`.") flag.Parse() - sourConfig, err := config.GetSourConfig() + configJson, ok := os.LookupEnv("SOUR_CONFIG") + if !ok { + log.Fatal().Msg("SOUR_CONFIG not defined") + } + + sourConfig, err := config.GetSourConfig([]byte(configJson)) if err != nil { log.Fatal().Err(err).Msg("failed to load sour configuration, please specify one with the SOUR_CONFIG environment variable") } clusterConfig := sourConfig.Cluster - db, err := state.InitDB(clusterConfig.DBPath) - if err != nil { - log.Fatal().Err(err).Msg("failed to initialize sqlite") - } - consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: time.RFC3339} log.Logger = log.Output(consoleWriter) - if clusterConfig.LogDirectory != "" { - logDir := clusterConfig.LogDirectory - err = os.MkdirAll(logDir, 0755) - if err != nil { - log.Fatal().Err(err).Msgf("failed to make log dir: %s", logDir) - } - - path := filepath.Join( - logDir, - fmt.Sprintf( - "%s.json", - time.Now().Format("2006.01.02.03.04.05"), - ), - ) - - logFile, err := os.Create(path) - if err != nil { - log.Fatal().Err(err).Msgf("failed to make log file: %s", path) - } - defer logFile.Close() - - log.Logger = log.Output(zerolog.MultiLevelWriter(consoleWriter, logFile)) - log.Info().Msgf("logging to %s", path) - } - - if *cpuProfile != "" { - f, err := os.Create(*cpuProfile) - if err != nil { - log.Fatal().Err(err).Msg("could not create CPU profile") - } - defer f.Close() // error handling omitted for example - if err := pprof.StartCPUProfile(f); err != nil { - log.Fatal().Err(err).Msg("could not start CPU profile") - } - defer pprof.StopCPUProfile() - } - zerolog.SetGlobalLevel(zerolog.InfoLevel) if *debug { zerolog.SetGlobalLevel(zerolog.DebugLevel) log.Warn().Msg("debug logging enabled") } - if clusterConfig.LogSessions { - log.Info().Msg("storing user sessions") - } + clusterConfig.LogSessions = false ctx, cancel := context.WithCancel(context.Background()) defer cancel() - state := state.NewStateService(sourConfig.Redis) - - stores, err := stores.New(db, sourConfig.AssetStores) - if err != nil { - log.Fatal().Err(err).Msg("asset stores failed to initialize") - } - - var cache assets.Store = assets.NewRedisCache(state.Client, time.Hour) + var cache assets.Store cacheDir := clusterConfig.CacheDirectory if cacheDir != "" { err = os.MkdirAll(cacheDir, 0755) @@ -113,7 +60,16 @@ func main() { cache = assets.FSStore(cacheDir) } - assetFetcher, err := assets.NewAssetFetcher(ctx, cache, clusterConfig.Assets, true) + if cache == nil { + log.Fatal().Msg("no cache directory specified") + } + + assetFetcher, err := assets.NewAssetFetcher( + ctx, + cache, + clusterConfig.Assets, + true, + ) if err != nil { log.Fatal().Err(err).Msg("asset fetcher failed to initialize") } @@ -125,32 +81,21 @@ func main() { numCfgMaps++ } } - log.Info().Msgf("loaded %d maps (%d no .cfg)", len(maps), len(maps) - numCfgMaps) - - go assetFetcher.PollDownloads(ctx) - var discord *auth.DiscordService = nil - discordSettings := sourConfig.Discord - if discordSettings.Enabled { - log.Info().Msg("Discord authentication enabled") - discord = auth.NewDiscordService( - discordSettings, - state, - db, - ) + if len(maps) == 0 { + log.Fatal().Msg("no maps found") } + log.Info().Msgf("loaded %d maps (%d no .cfg)", len(maps), len(maps)-numCfgMaps) + + go assetFetcher.PollDownloads(ctx) + serverManager := servers.NewServerManager(assetFetcher, clusterConfig.ServerDescription, clusterConfig.Presets) cluster := service.NewCluster( ctx, serverManager, assetFetcher, clusterConfig, - sourConfig.Discord.Domain, - discord, - state.Client, - db, - stores, ) err = serverManager.Start() @@ -160,7 +105,7 @@ func main() { newConnections := make(chan ingress.Connection) - wsIngress := ingress.NewWSIngress(newConnections, discord) + wsIngress := ingress.NewWSIngress(newConnections) enet := make([]*ingress.ENetIngress, 0) infoServices := make([]*servers.ServerInfoService, 0) @@ -201,11 +146,23 @@ func main() { go cluster.PollDuels(ctx) go wsIngress.StartWatcher(ctx) + staticSite, err := static.Site(configJson) + if err != nil { + log.Fatal().Err(err).Msg("failed to load site data") + } + errc := make(chan error, 1) go func() { mux := http.NewServeMux() - mux.Handle("/", wsIngress) + mux.Handle("/", staticSite) + mux.Handle("/ws/", wsIngress) mux.Handle("/api/", cluster) + mux.Handle("/assets/", http.StripPrefix( + "/assets/", + http.FileServer( + http.Dir("../client/dist/assets"), + ), + )) errc <- http.ListenAndServe( fmt.Sprintf("0.0.0.0:%d", clusterConfig.Ingress.Web.Port), @@ -224,18 +181,6 @@ func main() { log.Printf("terminating: %v", sig) } - if *memProfile != "" { - f, err := os.Create(*memProfile) - if err != nil { - log.Fatal().Err(err).Msg("could not create memory profile") - } - defer f.Close() // error handling omitted for example - runtime.GC() // get up-to-date statistics - if err := pprof.WriteHeapProfile(f); err != nil { - log.Fatal().Err(err).Msg("could not write memory profile") - } - } - for _, enetIngress := range enet { enetIngress.Shutdown() } diff --git a/services/go/svc/cluster/servers/gameserver.go b/cmd/server/servers/gameserver.go similarity index 100% rename from services/go/svc/cluster/servers/gameserver.go rename to cmd/server/servers/gameserver.go diff --git a/services/go/svc/cluster/servers/module.go b/cmd/server/servers/module.go similarity index 98% rename from services/go/svc/cluster/servers/module.go rename to cmd/server/servers/module.go index e693bf88d..ed046ff52 100644 --- a/services/go/svc/cluster/servers/module.go +++ b/cmd/server/servers/module.go @@ -11,13 +11,13 @@ import ( "sync" "time" + "github.com/cfoust/sour/cmd/server/config" + "github.com/cfoust/sour/cmd/server/ingress" "github.com/cfoust/sour/pkg/assets" C "github.com/cfoust/sour/pkg/game/constants" P "github.com/cfoust/sour/pkg/game/protocol" "github.com/cfoust/sour/pkg/maps" "github.com/cfoust/sour/pkg/server" - "github.com/cfoust/sour/svc/cluster/config" - "github.com/cfoust/sour/svc/cluster/ingress" "github.com/repeale/fp-go" "github.com/repeale/fp-go/option" diff --git a/services/go/svc/cluster/servers/serverinfo.go b/cmd/server/servers/serverinfo.go similarity index 100% rename from services/go/svc/cluster/servers/serverinfo.go rename to cmd/server/servers/serverinfo.go diff --git a/services/go/svc/cluster/service/api.go b/cmd/server/service/api.go similarity index 100% rename from services/go/svc/cluster/service/api.go rename to cmd/server/service/api.go diff --git a/services/go/svc/cluster/service/auth.go b/cmd/server/service/auth.go similarity index 97% rename from services/go/svc/cluster/service/auth.go rename to cmd/server/service/auth.go index 814a8464c..cc6b6ae86 100644 --- a/services/go/svc/cluster/service/auth.go +++ b/cmd/server/service/auth.go @@ -5,10 +5,10 @@ import ( "fmt" "time" + "github.com/cfoust/sour/cmd/server/auth" + "github.com/cfoust/sour/cmd/server/state" "github.com/cfoust/sour/pkg/game" P "github.com/cfoust/sour/pkg/game/protocol" - "github.com/cfoust/sour/svc/cluster/auth" - "github.com/cfoust/sour/svc/cluster/state" ) func (c *Cluster) DoAuthChallenge(ctx context.Context, user *User, id string) (*state.User, error) { diff --git a/services/go/svc/cluster/service/commands.go b/cmd/server/service/commands.go similarity index 98% rename from services/go/svc/cluster/service/commands.go rename to cmd/server/service/commands.go index 151815cde..3a2e40f2f 100644 --- a/services/go/svc/cluster/service/commands.go +++ b/cmd/server/service/commands.go @@ -7,13 +7,13 @@ import ( "strings" "time" + "github.com/cfoust/sour/cmd/server/ingress" + "github.com/cfoust/sour/cmd/server/servers" + "github.com/cfoust/sour/cmd/server/verse" "github.com/cfoust/sour/pkg/game" "github.com/cfoust/sour/pkg/game/commands" "github.com/cfoust/sour/pkg/game/constants" "github.com/cfoust/sour/pkg/server/protocol/gamemode" - "github.com/cfoust/sour/svc/cluster/ingress" - "github.com/cfoust/sour/svc/cluster/servers" - "github.com/cfoust/sour/svc/cluster/verse" "github.com/repeale/fp-go/option" "github.com/rs/zerolog/log" diff --git a/services/go/svc/cluster/service/cs.go b/cmd/server/service/cs.go similarity index 99% rename from services/go/svc/cluster/service/cs.go rename to cmd/server/service/cs.go index f6de7f233..5dc937e43 100644 --- a/services/go/svc/cluster/service/cs.go +++ b/cmd/server/service/cs.go @@ -7,9 +7,9 @@ import ( "time" "github.com/cfoust/sour/pkg/game" - "github.com/cfoust/sour/pkg/utils" C "github.com/cfoust/sour/pkg/game/constants" P "github.com/cfoust/sour/pkg/game/protocol" + "github.com/cfoust/sour/pkg/utils" ) //go:embed purgatory.ogz @@ -25,7 +25,7 @@ func sendServerInfo(ctx context.Context, u *User, domain string) (P.Connect, err info, ) if err != nil { - return P.Connect{}, err + return P.Connect{}, err } connect := msg.(P.Connect) diff --git a/services/go/svc/cluster/service/elo.go b/cmd/server/service/elo.go similarity index 96% rename from services/go/svc/cluster/service/elo.go rename to cmd/server/service/elo.go index afe7d7136..785f78bf0 100644 --- a/services/go/svc/cluster/service/elo.go +++ b/cmd/server/service/elo.go @@ -4,8 +4,8 @@ import ( "context" "sync" - "github.com/cfoust/sour/svc/cluster/config" - "github.com/cfoust/sour/svc/cluster/state" + "github.com/cfoust/sour/cmd/server/config" + "github.com/cfoust/sour/cmd/server/state" "gorm.io/gorm" ) diff --git a/services/go/svc/cluster/service/events.go b/cmd/server/service/events.go similarity index 99% rename from services/go/svc/cluster/service/events.go rename to cmd/server/service/events.go index ad3f82e91..e91fc9f47 100644 --- a/services/go/svc/cluster/service/events.go +++ b/cmd/server/service/events.go @@ -6,14 +6,14 @@ import ( "strings" "time" + "github.com/cfoust/sour/cmd/server/ingress" + "github.com/cfoust/sour/cmd/server/servers" "github.com/cfoust/sour/pkg/chanlock" "github.com/cfoust/sour/pkg/game" "github.com/cfoust/sour/pkg/game/constants" "github.com/cfoust/sour/pkg/game/io" P "github.com/cfoust/sour/pkg/game/protocol" S "github.com/cfoust/sour/pkg/server" - "github.com/cfoust/sour/svc/cluster/ingress" - "github.com/cfoust/sour/svc/cluster/servers" "github.com/rs/zerolog/log" ) diff --git a/services/go/svc/cluster/service/home.go b/cmd/server/service/home.go similarity index 100% rename from services/go/svc/cluster/service/home.go rename to cmd/server/service/home.go diff --git a/services/go/svc/cluster/service/maps.go b/cmd/server/service/maps.go similarity index 100% rename from services/go/svc/cluster/service/maps.go rename to cmd/server/service/maps.go diff --git a/services/go/svc/cluster/service/matchmaking.go b/cmd/server/service/matchmaking.go similarity index 98% rename from services/go/svc/cluster/service/matchmaking.go rename to cmd/server/service/matchmaking.go index a61085a2b..36258cecf 100644 --- a/services/go/svc/cluster/service/matchmaking.go +++ b/cmd/server/service/matchmaking.go @@ -6,12 +6,12 @@ import ( "sync" "time" + "github.com/cfoust/sour/cmd/server/config" + "github.com/cfoust/sour/cmd/server/ingress" + "github.com/cfoust/sour/cmd/server/servers" "github.com/cfoust/sour/pkg/game" P "github.com/cfoust/sour/pkg/game/protocol" "github.com/cfoust/sour/pkg/mmr" - "github.com/cfoust/sour/svc/cluster/config" - "github.com/cfoust/sour/svc/cluster/ingress" - "github.com/cfoust/sour/svc/cluster/servers" "github.com/repeale/fp-go/option" "github.com/rs/zerolog" @@ -737,9 +737,6 @@ func (server *Cluster) PollDuels(ctx context.Context) { loserELO.Losses++ } - winner.SaveELOState(ctx) - loser.SaveELOState(ctx) - if result.IsDraw { message := "the duel ended in a draw, your rating is unchanged" winner.Message(message) diff --git a/services/go/svc/cluster/service/module.go b/cmd/server/service/module.go similarity index 88% rename from services/go/svc/cluster/service/module.go rename to cmd/server/service/module.go index 78285fb31..c3d5149c0 100644 --- a/services/go/svc/cluster/service/module.go +++ b/cmd/server/service/module.go @@ -5,17 +5,16 @@ import ( "sync" "time" + "github.com/cfoust/sour/cmd/server/auth" + "github.com/cfoust/sour/cmd/server/config" + "github.com/cfoust/sour/cmd/server/ingress" + "github.com/cfoust/sour/cmd/server/servers" + "github.com/cfoust/sour/cmd/server/verse" "github.com/cfoust/sour/pkg/assets" "github.com/cfoust/sour/pkg/chanlock" "github.com/cfoust/sour/pkg/game" "github.com/cfoust/sour/pkg/game/commands" P "github.com/cfoust/sour/pkg/game/protocol" - "github.com/cfoust/sour/svc/cluster/auth" - "github.com/cfoust/sour/svc/cluster/config" - "github.com/cfoust/sour/svc/cluster/ingress" - "github.com/cfoust/sour/svc/cluster/servers" - "github.com/cfoust/sour/svc/cluster/stores" - "github.com/cfoust/sour/svc/cluster/verse" "github.com/go-redis/redis/v9" "github.com/rs/zerolog/log" @@ -60,18 +59,11 @@ func NewCluster( serverManager *servers.ServerManager, maps *assets.AssetFetcher, settings config.ClusterSettings, - authDomain string, - auth *auth.DiscordService, - redis *redis.Client, - db *gorm.DB, - store *stores.AssetStorage, ) *Cluster { - v := verse.NewVerse(db, store) server := &Cluster{ - Users: NewUserOrchestrator(db, v, settings.Matchmaking.Duel), + Users: NewUserOrchestrator(settings.Matchmaking.Duel), serverCtx: ctx, settings: settings, - authDomain: authDomain, hostServers: make(map[string]*servers.GameServer), commands: commands.NewCommandGroup[*User]("general", game.ColorOrange), lastCreate: make(map[string]time.Time), @@ -79,11 +71,7 @@ func NewCluster( serverMessage: make(chan []byte, 1), servers: serverManager, started: time.Now(), - auth: auth, - redis: redis, - db: db, - verse: v, - spaces: verse.NewSpaceManager(v, serverManager, maps), + spaces: verse.NewSpaceManager(serverManager, maps), assets: maps, } diff --git a/services/go/svc/cluster/service/purgatory.ogz b/cmd/server/service/purgatory.ogz similarity index 100% rename from services/go/svc/cluster/service/purgatory.ogz rename to cmd/server/service/purgatory.ogz diff --git a/services/go/svc/cluster/service/sessions.go b/cmd/server/service/sessions.go similarity index 100% rename from services/go/svc/cluster/service/sessions.go rename to cmd/server/service/sessions.go diff --git a/services/go/svc/cluster/service/users.go b/cmd/server/service/users.go similarity index 84% rename from services/go/svc/cluster/service/users.go rename to cmd/server/service/users.go index ebcbc5261..362e3bce2 100644 --- a/services/go/svc/cluster/service/users.go +++ b/cmd/server/service/users.go @@ -14,11 +14,11 @@ import ( "github.com/cfoust/sour/pkg/server" "github.com/cfoust/sour/pkg/utils" - "github.com/cfoust/sour/svc/cluster/config" - "github.com/cfoust/sour/svc/cluster/ingress" - "github.com/cfoust/sour/svc/cluster/servers" - "github.com/cfoust/sour/svc/cluster/state" - "github.com/cfoust/sour/svc/cluster/verse" + "github.com/cfoust/sour/cmd/server/config" + "github.com/cfoust/sour/cmd/server/ingress" + "github.com/cfoust/sour/cmd/server/servers" + "github.com/cfoust/sour/cmd/server/state" + "github.com/cfoust/sour/cmd/server/verse" "github.com/rs/zerolog" "github.com/rs/zerolog/log" @@ -238,7 +238,7 @@ func (u *User) ResponseTimeout(ctx context.Context, timeout time.Duration, code } func (u *User) Response(ctx context.Context, code P.MessageCode, messages ...P.Message) (P.Message, error) { - return u.ResponseTimeout(ctx, 5 * time.Second, code, messages...) + return u.ResponseTimeout(ctx, 5*time.Second, code, messages...) } func (c *User) ReceiveToMessages() <-chan TrackedPacket { @@ -288,49 +288,7 @@ func (u *User) GetSpace() *verse.SpaceInstance { } func (u *User) GetHomeSpace(ctx context.Context) (*state.Space, error) { - auth := u.GetAuth() - if auth == nil { - return nil, fmt.Errorf("user is not logged in") - } - - var space state.Space - query := state.Space{} - query.ID = auth.HomeID - err := u.o.db.WithContext(ctx).Where(query).First(&space).Error - if err != nil && err != gorm.ErrRecordNotFound { - return nil, err - } - - // We only create home spaces on demand - if err == gorm.ErrRecordNotFound { - userSpace, err := u.o.verse.NewSpace(ctx, auth) - if err != nil { - return nil, err - } - - err = userSpace.SetDescription( - ctx, - "Home", - ) - if err != nil { - return nil, err - } - - space, err := userSpace.GetSpace(ctx) - if err != nil { - return nil, err - } - - auth.HomeID = space.ID - err = u.o.db.WithContext(ctx).Save(&auth).Error - if err != nil { - return nil, err - } - - return space, nil - } - - return &space, nil + return nil, fmt.Errorf("feature disabled") } func (u *User) GetSpaceEntity(ctx context.Context) (*state.Space, error) { @@ -457,17 +415,7 @@ func (u *User) GetName() string { } func (u *User) SetName(ctx context.Context, name string) error { - u.Mutex.Lock() - u.Name = name - u.Mutex.Unlock() - - auth := u.GetAuth() - if auth == nil { - return nil - } - - auth.Nickname = name - return u.o.db.WithContext(ctx).Save(&auth).Error + return fmt.Errorf("feature disabled") } func (u *User) GetAuth() *state.User { @@ -497,46 +445,6 @@ func (u *User) AnnounceELO() { u.Message(result) } -func (u *User) HydrateELOState(ctx context.Context, authUser *state.User) error { - u.Mutex.Lock() - defer u.Mutex.Unlock() - - elo := NewELOState(u.o.Duels) - - for _, duel := range u.o.Duels { - state, err := LoadELOState(ctx, u.o.db, authUser, duel.Name) - - if err == nil { - elo.Ratings[duel.Name] = state - continue - } - - return err - } - - u.ELO = elo - - return nil -} - -func (u *User) SaveELOState(ctx context.Context) error { - if u.Auth == nil { - return nil - } - - u.Mutex.Lock() - defer u.Mutex.Unlock() - - for matchType, state := range u.ELO.Ratings { - err := state.SaveState(ctx, u.o.db, u.Auth, matchType) - if err != nil { - return err - } - } - - return nil -} - func (u *User) LogVisit(ctx context.Context) error { visit := state.Visit{ SessionID: u.sessionLog.ID, @@ -563,36 +471,12 @@ func (u *User) LogVisit(ctx context.Context) error { } } - err := u.o.db.WithContext(ctx).Save(&visit).Error - if err != nil { - return err - } - <-u.ServerSession.Ctx().Done() - visit.End = time.Now() - return u.o.db.WithContext(ctx).Save(&visit).Error + return nil } func (u *User) HandleAuthentication(ctx context.Context, auth *state.User) error { - if auth == nil { - return nil - } - - u.Mutex.Lock() - u.Auth = auth - u.Mutex.Unlock() - - err := u.HydrateELOState(ctx, auth) - if err != nil { - return err - } - - err = u.SaveELOState(ctx) - if err != nil { - return err - } - return nil } @@ -731,31 +615,21 @@ type UserOrchestrator struct { Users []*User Servers map[*servers.GameServer][]*User Mutex deadlock.RWMutex - - db *gorm.DB - verse *verse.Verse + verse *verse.Verse } -func NewUserOrchestrator(db *gorm.DB, verse *verse.Verse, duels []config.DuelType) *UserOrchestrator { +func NewUserOrchestrator(duels []config.DuelType) *UserOrchestrator { return &UserOrchestrator{ Duels: duels, Users: make([]*User, 0), Servers: make(map[*servers.GameServer][]*User), - db: db, - verse: verse, } } func (u *UserOrchestrator) PollUser(ctx context.Context, user *User) { <-user.Ctx().Done() - logger := user.Logger() u.RemoveUser(user) - user.sessionLog.End = time.Now() - err := u.db.WithContext(ctx).Save(user.sessionLog).Error - if err != nil { - logger.Error().Err(err).Msg("failed to set session end") - } } func (u *UserOrchestrator) newSessionID() (ingress.ClientID, error) { @@ -815,24 +689,13 @@ func (u *UserOrchestrator) AddUser(ctx context.Context, connection ingress.Conne return nil, err } - host, err := getAddress(ctx, u.db, utils.HashString(connection.Host())) - if err != nil { - return nil, err - } - sessionID := utils.HashString(fmt.Sprintf("%d-%s", id, connection.Host())) sessionLog := state.Session{ - HostID: host.ID, UUID: sessionID, Device: connection.DeviceType(), } sessionLog.Start = time.Now() - err = u.db.WithContext(ctx).Save(&sessionLog).Error - if err != nil { - return nil, err - } - u.Mutex.Lock() user := User{ Id: id, diff --git a/services/go/svc/cluster/state/module.go b/cmd/server/state/module.go similarity index 89% rename from services/go/svc/cluster/state/module.go rename to cmd/server/state/module.go index 42db96f2e..32409d085 100644 --- a/services/go/svc/cluster/state/module.go +++ b/cmd/server/state/module.go @@ -1,7 +1,7 @@ package state import ( - "github.com/cfoust/sour/svc/cluster/config" + "github.com/cfoust/sour/cmd/server/config" "github.com/go-redis/redis/v9" ) diff --git a/services/go/svc/cluster/state/sql.go b/cmd/server/state/sql.go similarity index 100% rename from services/go/svc/cluster/state/sql.go rename to cmd/server/state/sql.go diff --git a/cmd/server/static/.gitignore b/cmd/server/static/.gitignore new file mode 100644 index 000000000..c9490a532 --- /dev/null +++ b/cmd/server/static/.gitignore @@ -0,0 +1 @@ +/site diff --git a/cmd/server/static/module.go b/cmd/server/static/module.go new file mode 100644 index 000000000..a53ea96f8 --- /dev/null +++ b/cmd/server/static/module.go @@ -0,0 +1,116 @@ +package static + +import ( + "bytes" + "embed" + "fmt" + "io/fs" + "net/http" + "time" +) + +//go:embed site +var staticFiles embed.FS + +// injectedFile pretends to be an fs.File, but just returns a byte buffer. +type injectedFile struct { + name string + data []byte + reader *bytes.Reader +} + +var _ fs.File = (*injectedFile)(nil) + +type injectedFileInfo struct { + name string + size int64 +} + +var _ fs.FileInfo = (*injectedFileInfo)(nil) + +func (i *injectedFileInfo) Name() string { + return i.name +} + +func (i *injectedFileInfo) Size() int64 { + return i.size +} + +func (i *injectedFileInfo) Mode() fs.FileMode { + return 0444 +} + +func (i *injectedFileInfo) Sys() interface{} { + return nil +} + +func (i *injectedFileInfo) ModTime() time.Time { + return time.Time{} +} + +func (i *injectedFileInfo) IsDir() bool { + return false +} + +func (i *injectedFile) Stat() (fs.FileInfo, error) { + return &injectedFileInfo{ + name: i.name, + size: int64(len(i.data)), + }, nil +} + +func (i *injectedFile) Read(p []byte) (n int, err error) { + return i.reader.Read(p) +} + +func (i *injectedFile) Close() error { + return nil +} + +// dynamicFS is an fs.FS that allows you to overwrite files with injected +// data. +type dynamicFS struct { + original fs.FS + injected map[string][]byte +} + +var _ fs.FS = (*dynamicFS)(nil) + +func (d *dynamicFS) Open(name string) (fs.File, error) { + if data, ok := d.injected[name]; ok { + return &injectedFile{ + name: name, + data: data, + reader: bytes.NewReader(data), + }, nil + } + + return d.original.Open(name) +} + +func Site(config string) (http.Handler, error) { + content, _ := fs.Sub(fs.FS(staticFiles), "site") + + // Get index.js and inject config as JSON + index, _ := fs.ReadFile(content, "index.js") + + prefix := fmt.Sprintf( + "const INJECTED_SOUR_CONFIG = %s;\n", + string(config), + ) + + index = append( + []byte(prefix), + index..., + ) + + // Inject index.js + injected := map[string][]byte{ + "index.js": index, + } + + return http.FileServer(http.FS(&dynamicFS{ + original: content, + injected: injected, + })), nil +} diff --git a/services/go/svc/cluster/stores/module.go b/cmd/server/stores/module.go similarity index 95% rename from services/go/svc/cluster/stores/module.go rename to cmd/server/stores/module.go index 508cbcf92..d7e0070cf 100644 --- a/services/go/svc/cluster/stores/module.go +++ b/cmd/server/stores/module.go @@ -6,10 +6,10 @@ import ( "os" "time" + "github.com/cfoust/sour/cmd/server/config" + "github.com/cfoust/sour/cmd/server/state" "github.com/cfoust/sour/pkg/assets" "github.com/cfoust/sour/pkg/utils" - "github.com/cfoust/sour/svc/cluster/config" - "github.com/cfoust/sour/svc/cluster/state" "gorm.io/gorm" ) @@ -30,7 +30,7 @@ func (s *AssetStorage) Get(ctx context.Context, asset *state.Asset) ([]byte, err asset.Accessed = time.Now() err := s.db.WithContext(ctx).Save(asset).Error if err != nil { - return nil, err + return nil, err } return store.Get(ctx, asset.Hash) diff --git a/services/go/svc/cluster/verse/editing.go b/cmd/server/verse/editing.go similarity index 99% rename from services/go/svc/cluster/verse/editing.go rename to cmd/server/verse/editing.go index 54f46e22d..42825c91c 100644 --- a/services/go/svc/cluster/verse/editing.go +++ b/cmd/server/verse/editing.go @@ -7,11 +7,11 @@ import ( "time" "unsafe" + "github.com/cfoust/sour/cmd/server/ingress" C "github.com/cfoust/sour/pkg/game/constants" P "github.com/cfoust/sour/pkg/game/protocol" "github.com/cfoust/sour/pkg/maps" "github.com/cfoust/sour/pkg/maps/worldio" - "github.com/cfoust/sour/svc/cluster/ingress" "github.com/rs/zerolog/log" ) diff --git a/services/go/svc/cluster/verse/module.go b/cmd/server/verse/module.go similarity index 97% rename from services/go/svc/cluster/verse/module.go rename to cmd/server/verse/module.go index 16115428f..9e5f00113 100644 --- a/services/go/svc/cluster/verse/module.go +++ b/cmd/server/verse/module.go @@ -9,10 +9,10 @@ import ( "regexp" "time" + "github.com/cfoust/sour/cmd/server/state" + "github.com/cfoust/sour/cmd/server/stores" "github.com/cfoust/sour/pkg/maps" "github.com/cfoust/sour/pkg/utils" - "github.com/cfoust/sour/svc/cluster/state" - "github.com/cfoust/sour/svc/cluster/stores" "gorm.io/gorm" ) @@ -22,11 +22,8 @@ type Verse struct { db *gorm.DB } -func NewVerse(db *gorm.DB, store *stores.AssetStorage) *Verse { - return &Verse{ - db: db, - store: store, - } +func NewVerse() *Verse { + return &Verse{} } type entity struct { diff --git a/services/go/svc/cluster/verse/spaces.go b/cmd/server/verse/spaces.go similarity index 96% rename from services/go/svc/cluster/verse/spaces.go rename to cmd/server/verse/spaces.go index dc5eb7a96..2050aece8 100644 --- a/services/go/svc/cluster/verse/spaces.go +++ b/cmd/server/verse/spaces.go @@ -8,14 +8,14 @@ import ( "strings" "time" + "github.com/cfoust/sour/cmd/server/config" + "github.com/cfoust/sour/cmd/server/ingress" + gameServers "github.com/cfoust/sour/cmd/server/servers" "github.com/cfoust/sour/pkg/assets" "github.com/cfoust/sour/pkg/game/commands" C "github.com/cfoust/sour/pkg/game/constants" "github.com/cfoust/sour/pkg/server" "github.com/cfoust/sour/pkg/utils" - "github.com/cfoust/sour/svc/cluster/config" - "github.com/cfoust/sour/svc/cluster/ingress" - gameServers "github.com/cfoust/sour/svc/cluster/servers" "github.com/rs/zerolog" "github.com/rs/zerolog/log" @@ -151,10 +151,9 @@ type SpaceManager struct { maps *assets.AssetFetcher } -func NewSpaceManager(verse *Verse, servers *gameServers.ServerManager, maps *assets.AssetFetcher) *SpaceManager { +func NewSpaceManager(servers *gameServers.ServerManager, maps *assets.AssetFetcher) *SpaceManager { return &SpaceManager{ Session: utils.NewSession(context.Background()), - verse: verse, servers: servers, instances: make(map[string]*SpaceInstance), maps: maps, diff --git a/services/go/svc/cluster/watcher/module.go b/cmd/server/watcher/module.go similarity index 97% rename from services/go/svc/cluster/watcher/module.go rename to cmd/server/watcher/module.go index 0ab18fdad..fc53669ab 100644 --- a/services/go/svc/cluster/watcher/module.go +++ b/cmd/server/watcher/module.go @@ -13,10 +13,11 @@ import ( ) /* -#cgo LDFLAGS: -lenet +#cgo LDFLAGS: -L./../../../pkg/enet/enet -lenet +#cgo CFLAGS: -I./../../../pkg/enet/enet/include +#include #include #include -#include ENetAddress resolveServer(const char *host, int port) { ENetAddress serverAddress = { ENET_HOST_ANY, ENET_PORT_ANY }; @@ -59,6 +60,8 @@ int receiveServer(ENetSocket socket, ENetAddress address, void * output) { int len = enet_socket_receive(socket, &address, &buf, 1); return len; } + + return 0; } void destroySocket(ENetSocket sock) { @@ -91,8 +94,8 @@ type Server struct { type Servers map[Address]Server type Watcher struct { - serverMutex sync.Mutex - servers Servers + serverMutex sync.Mutex + servers Servers } func NewWatcher() *Watcher { diff --git a/services/go/cmd/sourdump/main.go b/cmd/sourdump/main.go similarity index 98% rename from services/go/cmd/sourdump/main.go rename to cmd/sourdump/main.go index eb4af541e..9e6b47a83 100644 --- a/services/go/cmd/sourdump/main.go +++ b/cmd/sourdump/main.go @@ -367,8 +367,10 @@ func Download(ctx context.Context, cache assets.Store, roots []assets.Root, outD outCache := assets.FSStore(outDir) for _, target := range targets { + found := false + for _, root := range roots { - remoteRoot, ok := root.(*assets.RemoteRoot) + remoteRoot, ok := root.(*assets.PackagedRoot) if !ok { continue } @@ -385,13 +387,19 @@ func Download(ctx context.Context, cache assets.Store, roots []assets.Root, outD if err != nil { log.Fatal().Err(err).Msgf("could not save asset %s", target) } + + found = true + } + + if !found { + log.Fatal().Msgf("could not find asset '%s'", target) } } } func List(cache assets.Store, roots []assets.Root) { for _, root := range roots { - remoteRoot, ok := root.(*assets.RemoteRoot) + remoteRoot, ok := root.(*assets.PackagedRoot) if !ok { continue } @@ -482,6 +490,8 @@ func main() { log.Fatal().Msg("You must provide at least one argument.") } + fmt.Printf("roots: %+v", roots) + cache := assets.FSStore(*cacheDir) ctx := context.Background() assetRoots, err := assets.LoadRoots(ctx, cache, roots, false) diff --git a/entrypoint b/entrypoint deleted file mode 100755 index 6c5b93765..000000000 --- a/entrypoint +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash - -set -e - -CONFIG_DIR=${CONFIG_DIR:-/sour/config} - -mkdir -p "$CONFIG_DIR" - -# Validate the user's configuration and export the combined config -export SOUR_CONFIG=$(cue export /sour/schema.cue /sour/config.yaml $(find $CONFIG_DIR -name '*.json' -or -name '*.yaml' -or -name '*.cue')) - -if [ -z "$SOUR_CONFIG" ]; then - echo "A Sour config must be defined." -fi - -CLIENT_CONFIG=$(echo "$SOUR_CONFIG" | jq '.client') -# Inject the runtime configuration into the frontend's code -# When a container is restarted, the previous FS state still exists, so detect this -if ! grep -q "const INJECTED_SOUR_CONFIG" /app/index.js; then - echo "const INJECTED_SOUR_CONFIG = $CLIENT_CONFIG;" >> /tmp/out.js - cat /app/index.js >> /tmp/out.js - cp /tmp/out.js /app/index.js -fi - -wsproxy 28785 & -nginx -g 'daemon off;' & -(mkdir -p /redis && cd /redis && redis-server /sour/redis.conf) & -sleep 1 -cluster $CLUSTER_ARGS & - -tail -F /bin/entrypoint diff --git a/examples/docker-compose.yml b/examples/docker-compose.yml deleted file mode 100644 index 1c1eb0d16..000000000 --- a/examples/docker-compose.yml +++ /dev/null @@ -1,24 +0,0 @@ -version: '3' - -services: - sour-site: - image: registry.digitalocean.com/cfoust/sour:latest - container_name: sour-site - environment: - VIRTUAL_HOST: sourga.me - VIRTUAL_HOST_ALIAS: www.sourga.me - VIRTUAL_PORT: 1234 - LETSENCRYPT_HOST: sourga.me - LETSENCRYPT_EMAIL: XXXXXXX - ASSET_SOURCE: https://d2pz4qr8rxo7le.cloudfront.net/2bfc017.index.json - GAME_SERVER: wss://server.sourga.me - stdin_open: True - tty: True - networks: - - letsencrypt - -networks: - letsencrypt: - external: true - -# vim: expandtab tabstop=2 softtabstop=2 shiftwidth=2 diff --git a/services/game/.agignore b/game/.agignore similarity index 100% rename from services/game/.agignore rename to game/.agignore diff --git a/services/game/.gitignore b/game/.gitignore similarity index 100% rename from services/game/.gitignore rename to game/.gitignore diff --git a/services/game/README.markdown b/game/README.markdown similarity index 100% rename from services/game/README.markdown rename to game/README.markdown diff --git a/services/game/arbitrary_ws.patch b/game/arbitrary_ws.patch similarity index 100% rename from services/game/arbitrary_ws.patch rename to game/arbitrary_ws.patch diff --git a/services/game/build b/game/build similarity index 100% rename from services/game/build rename to game/build diff --git a/services/game/file_create.patch b/game/file_create.patch similarity index 100% rename from services/game/file_create.patch rename to game/file_create.patch diff --git a/services/game/game/gl-matrix.js b/game/game/gl-matrix.js similarity index 100% rename from services/game/game/gl-matrix.js rename to game/game/gl-matrix.js diff --git a/services/game/js/api.js b/game/js/api.js similarity index 100% rename from services/game/js/api.js rename to game/js/api.js diff --git a/services/game/js/main.js b/game/js/main.js similarity index 100% rename from services/game/js/main.js rename to game/js/main.js diff --git a/services/game/resolve_wasm.patch b/game/resolve_wasm.patch similarity index 100% rename from services/game/resolve_wasm.patch rename to game/resolve_wasm.patch diff --git a/services/game/src/Makefile b/game/src/Makefile similarity index 100% rename from services/game/src/Makefile rename to game/src/Makefile diff --git a/services/game/src/enet/.gitignore b/game/src/enet/.gitignore similarity index 100% rename from services/game/src/enet/.gitignore rename to game/src/enet/.gitignore diff --git a/services/game/src/enet/ChangeLog b/game/src/enet/ChangeLog similarity index 100% rename from services/game/src/enet/ChangeLog rename to game/src/enet/ChangeLog diff --git a/services/game/src/enet/LICENSE b/game/src/enet/LICENSE similarity index 100% rename from services/game/src/enet/LICENSE rename to game/src/enet/LICENSE diff --git a/services/game/src/enet/Makefile b/game/src/enet/Makefile similarity index 100% rename from services/game/src/enet/Makefile rename to game/src/enet/Makefile diff --git a/services/game/src/enet/README b/game/src/enet/README similarity index 100% rename from services/game/src/enet/README rename to game/src/enet/README diff --git a/services/game/src/enet/autom4te.cache/output.0 b/game/src/enet/autom4te.cache/output.0 similarity index 100% rename from services/game/src/enet/autom4te.cache/output.0 rename to game/src/enet/autom4te.cache/output.0 diff --git a/services/game/src/enet/autom4te.cache/requests b/game/src/enet/autom4te.cache/requests similarity index 100% rename from services/game/src/enet/autom4te.cache/requests rename to game/src/enet/autom4te.cache/requests diff --git a/services/game/src/enet/autom4te.cache/traces.0 b/game/src/enet/autom4te.cache/traces.0 similarity index 100% rename from services/game/src/enet/autom4te.cache/traces.0 rename to game/src/enet/autom4te.cache/traces.0 diff --git a/services/game/src/enet/callbacks.c b/game/src/enet/callbacks.c similarity index 100% rename from services/game/src/enet/callbacks.c rename to game/src/enet/callbacks.c diff --git a/services/game/src/enet/check_cflags.sh b/game/src/enet/check_cflags.sh similarity index 100% rename from services/game/src/enet/check_cflags.sh rename to game/src/enet/check_cflags.sh diff --git a/services/game/src/enet/compress.c b/game/src/enet/compress.c similarity index 100% rename from services/game/src/enet/compress.c rename to game/src/enet/compress.c diff --git a/services/game/src/enet/host.c b/game/src/enet/host.c similarity index 100% rename from services/game/src/enet/host.c rename to game/src/enet/host.c diff --git a/services/game/src/enet/include/enet/callbacks.h b/game/src/enet/include/enet/callbacks.h similarity index 100% rename from services/game/src/enet/include/enet/callbacks.h rename to game/src/enet/include/enet/callbacks.h diff --git a/services/game/src/enet/include/enet/enet.h b/game/src/enet/include/enet/enet.h similarity index 100% rename from services/game/src/enet/include/enet/enet.h rename to game/src/enet/include/enet/enet.h diff --git a/services/game/src/enet/include/enet/list.h b/game/src/enet/include/enet/list.h similarity index 100% rename from services/game/src/enet/include/enet/list.h rename to game/src/enet/include/enet/list.h diff --git a/services/game/src/enet/include/enet/protocol.h b/game/src/enet/include/enet/protocol.h similarity index 100% rename from services/game/src/enet/include/enet/protocol.h rename to game/src/enet/include/enet/protocol.h diff --git a/services/game/src/enet/include/enet/time.h b/game/src/enet/include/enet/time.h similarity index 100% rename from services/game/src/enet/include/enet/time.h rename to game/src/enet/include/enet/time.h diff --git a/services/game/src/enet/include/enet/types.h b/game/src/enet/include/enet/types.h similarity index 100% rename from services/game/src/enet/include/enet/types.h rename to game/src/enet/include/enet/types.h diff --git a/services/game/src/enet/include/enet/unix.h b/game/src/enet/include/enet/unix.h similarity index 100% rename from services/game/src/enet/include/enet/unix.h rename to game/src/enet/include/enet/unix.h diff --git a/services/game/src/enet/include/enet/utility.h b/game/src/enet/include/enet/utility.h similarity index 100% rename from services/game/src/enet/include/enet/utility.h rename to game/src/enet/include/enet/utility.h diff --git a/services/game/src/enet/include/enet/win32.h b/game/src/enet/include/enet/win32.h similarity index 100% rename from services/game/src/enet/include/enet/win32.h rename to game/src/enet/include/enet/win32.h diff --git a/services/game/src/enet/list.c b/game/src/enet/list.c similarity index 100% rename from services/game/src/enet/list.c rename to game/src/enet/list.c diff --git a/services/game/src/enet/packet.c b/game/src/enet/packet.c similarity index 100% rename from services/game/src/enet/packet.c rename to game/src/enet/packet.c diff --git a/services/game/src/enet/peer.c b/game/src/enet/peer.c similarity index 100% rename from services/game/src/enet/peer.c rename to game/src/enet/peer.c diff --git a/services/game/src/enet/protocol.c b/game/src/enet/protocol.c similarity index 100% rename from services/game/src/enet/protocol.c rename to game/src/enet/protocol.c diff --git a/services/game/src/enet/unix.c b/game/src/enet/unix.c similarity index 100% rename from services/game/src/enet/unix.c rename to game/src/enet/unix.c diff --git a/services/game/src/enet/win32.c b/game/src/enet/win32.c similarity index 100% rename from services/game/src/enet/win32.c rename to game/src/enet/win32.c diff --git a/services/game/src/engine/3dgui.cpp b/game/src/engine/3dgui.cpp similarity index 100% rename from services/game/src/engine/3dgui.cpp rename to game/src/engine/3dgui.cpp diff --git a/services/game/src/engine/animmodel.h b/game/src/engine/animmodel.h similarity index 100% rename from services/game/src/engine/animmodel.h rename to game/src/engine/animmodel.h diff --git a/services/game/src/engine/bih.cpp b/game/src/engine/bih.cpp similarity index 100% rename from services/game/src/engine/bih.cpp rename to game/src/engine/bih.cpp diff --git a/services/game/src/engine/bih.h b/game/src/engine/bih.h similarity index 100% rename from services/game/src/engine/bih.h rename to game/src/engine/bih.h diff --git a/services/game/src/engine/blend.cpp b/game/src/engine/blend.cpp similarity index 100% rename from services/game/src/engine/blend.cpp rename to game/src/engine/blend.cpp diff --git a/services/game/src/engine/blob.cpp b/game/src/engine/blob.cpp similarity index 100% rename from services/game/src/engine/blob.cpp rename to game/src/engine/blob.cpp diff --git a/services/game/src/engine/client.cpp b/game/src/engine/client.cpp similarity index 100% rename from services/game/src/engine/client.cpp rename to game/src/engine/client.cpp diff --git a/services/game/src/engine/command.cpp b/game/src/engine/command.cpp similarity index 100% rename from services/game/src/engine/command.cpp rename to game/src/engine/command.cpp diff --git a/services/game/src/engine/console.cpp b/game/src/engine/console.cpp similarity index 100% rename from services/game/src/engine/console.cpp rename to game/src/engine/console.cpp diff --git a/services/game/src/engine/cubeloader.cpp b/game/src/engine/cubeloader.cpp similarity index 100% rename from services/game/src/engine/cubeloader.cpp rename to game/src/engine/cubeloader.cpp diff --git a/services/game/src/engine/decal.cpp b/game/src/engine/decal.cpp similarity index 100% rename from services/game/src/engine/decal.cpp rename to game/src/engine/decal.cpp diff --git a/services/game/src/engine/depthfx.h b/game/src/engine/depthfx.h similarity index 100% rename from services/game/src/engine/depthfx.h rename to game/src/engine/depthfx.h diff --git a/services/game/src/engine/dynlight.cpp b/game/src/engine/dynlight.cpp similarity index 100% rename from services/game/src/engine/dynlight.cpp rename to game/src/engine/dynlight.cpp diff --git a/services/game/src/engine/engine.h b/game/src/engine/engine.h similarity index 100% rename from services/game/src/engine/engine.h rename to game/src/engine/engine.h diff --git a/services/game/src/engine/explosion.h b/game/src/engine/explosion.h similarity index 100% rename from services/game/src/engine/explosion.h rename to game/src/engine/explosion.h diff --git a/services/game/src/engine/glare.cpp b/game/src/engine/glare.cpp similarity index 100% rename from services/game/src/engine/glare.cpp rename to game/src/engine/glare.cpp diff --git a/services/game/src/engine/grass.cpp b/game/src/engine/grass.cpp similarity index 100% rename from services/game/src/engine/grass.cpp rename to game/src/engine/grass.cpp diff --git a/services/game/src/engine/iqm.h b/game/src/engine/iqm.h similarity index 100% rename from services/game/src/engine/iqm.h rename to game/src/engine/iqm.h diff --git a/services/game/src/engine/lensflare.h b/game/src/engine/lensflare.h similarity index 100% rename from services/game/src/engine/lensflare.h rename to game/src/engine/lensflare.h diff --git a/services/game/src/engine/lightmap.cpp b/game/src/engine/lightmap.cpp similarity index 100% rename from services/game/src/engine/lightmap.cpp rename to game/src/engine/lightmap.cpp diff --git a/services/game/src/engine/lightmap.h b/game/src/engine/lightmap.h similarity index 100% rename from services/game/src/engine/lightmap.h rename to game/src/engine/lightmap.h diff --git a/services/game/src/engine/lightning.h b/game/src/engine/lightning.h similarity index 100% rename from services/game/src/engine/lightning.h rename to game/src/engine/lightning.h diff --git a/services/game/src/engine/main.cpp b/game/src/engine/main.cpp similarity index 100% rename from services/game/src/engine/main.cpp rename to game/src/engine/main.cpp diff --git a/services/game/src/engine/master.cpp b/game/src/engine/master.cpp similarity index 100% rename from services/game/src/engine/master.cpp rename to game/src/engine/master.cpp diff --git a/services/game/src/engine/material.cpp b/game/src/engine/material.cpp similarity index 100% rename from services/game/src/engine/material.cpp rename to game/src/engine/material.cpp diff --git a/services/game/src/engine/md2.h b/game/src/engine/md2.h similarity index 100% rename from services/game/src/engine/md2.h rename to game/src/engine/md2.h diff --git a/services/game/src/engine/md3.h b/game/src/engine/md3.h similarity index 100% rename from services/game/src/engine/md3.h rename to game/src/engine/md3.h diff --git a/services/game/src/engine/md5.h b/game/src/engine/md5.h similarity index 100% rename from services/game/src/engine/md5.h rename to game/src/engine/md5.h diff --git a/services/game/src/engine/menus.cpp b/game/src/engine/menus.cpp similarity index 100% rename from services/game/src/engine/menus.cpp rename to game/src/engine/menus.cpp diff --git a/services/game/src/engine/model.h b/game/src/engine/model.h similarity index 100% rename from services/game/src/engine/model.h rename to game/src/engine/model.h diff --git a/services/game/src/engine/movie.cpp b/game/src/engine/movie.cpp similarity index 100% rename from services/game/src/engine/movie.cpp rename to game/src/engine/movie.cpp diff --git a/services/game/src/engine/mpr.h b/game/src/engine/mpr.h similarity index 100% rename from services/game/src/engine/mpr.h rename to game/src/engine/mpr.h diff --git a/services/game/src/engine/normal.cpp b/game/src/engine/normal.cpp similarity index 100% rename from services/game/src/engine/normal.cpp rename to game/src/engine/normal.cpp diff --git a/services/game/src/engine/obj.h b/game/src/engine/obj.h similarity index 100% rename from services/game/src/engine/obj.h rename to game/src/engine/obj.h diff --git a/services/game/src/engine/octa.cpp b/game/src/engine/octa.cpp similarity index 100% rename from services/game/src/engine/octa.cpp rename to game/src/engine/octa.cpp diff --git a/services/game/src/engine/octa.h b/game/src/engine/octa.h similarity index 100% rename from services/game/src/engine/octa.h rename to game/src/engine/octa.h diff --git a/services/game/src/engine/octaedit.cpp b/game/src/engine/octaedit.cpp similarity index 100% rename from services/game/src/engine/octaedit.cpp rename to game/src/engine/octaedit.cpp diff --git a/services/game/src/engine/octarender.cpp b/game/src/engine/octarender.cpp similarity index 100% rename from services/game/src/engine/octarender.cpp rename to game/src/engine/octarender.cpp diff --git a/services/game/src/engine/pch.cpp b/game/src/engine/pch.cpp similarity index 100% rename from services/game/src/engine/pch.cpp rename to game/src/engine/pch.cpp diff --git a/services/game/src/engine/physics.cpp b/game/src/engine/physics.cpp similarity index 100% rename from services/game/src/engine/physics.cpp rename to game/src/engine/physics.cpp diff --git a/services/game/src/engine/pvs.cpp b/game/src/engine/pvs.cpp similarity index 100% rename from services/game/src/engine/pvs.cpp rename to game/src/engine/pvs.cpp diff --git a/services/game/src/engine/ragdoll.h b/game/src/engine/ragdoll.h similarity index 100% rename from services/game/src/engine/ragdoll.h rename to game/src/engine/ragdoll.h diff --git a/services/game/src/engine/rendergl.cpp b/game/src/engine/rendergl.cpp similarity index 100% rename from services/game/src/engine/rendergl.cpp rename to game/src/engine/rendergl.cpp diff --git a/services/game/src/engine/rendermodel.cpp b/game/src/engine/rendermodel.cpp similarity index 100% rename from services/game/src/engine/rendermodel.cpp rename to game/src/engine/rendermodel.cpp diff --git a/services/game/src/engine/renderparticles.cpp b/game/src/engine/renderparticles.cpp similarity index 100% rename from services/game/src/engine/renderparticles.cpp rename to game/src/engine/renderparticles.cpp diff --git a/services/game/src/engine/rendersky.cpp b/game/src/engine/rendersky.cpp similarity index 100% rename from services/game/src/engine/rendersky.cpp rename to game/src/engine/rendersky.cpp diff --git a/services/game/src/engine/rendertarget.h b/game/src/engine/rendertarget.h similarity index 100% rename from services/game/src/engine/rendertarget.h rename to game/src/engine/rendertarget.h diff --git a/services/game/src/engine/rendertext.cpp b/game/src/engine/rendertext.cpp similarity index 100% rename from services/game/src/engine/rendertext.cpp rename to game/src/engine/rendertext.cpp diff --git a/services/game/src/engine/renderva.cpp b/game/src/engine/renderva.cpp similarity index 100% rename from services/game/src/engine/renderva.cpp rename to game/src/engine/renderva.cpp diff --git a/services/game/src/engine/server.cpp b/game/src/engine/server.cpp similarity index 100% rename from services/game/src/engine/server.cpp rename to game/src/engine/server.cpp diff --git a/services/game/src/engine/serverbrowser.cpp b/game/src/engine/serverbrowser.cpp similarity index 100% rename from services/game/src/engine/serverbrowser.cpp rename to game/src/engine/serverbrowser.cpp diff --git a/services/game/src/engine/shader.cpp b/game/src/engine/shader.cpp similarity index 100% rename from services/game/src/engine/shader.cpp rename to game/src/engine/shader.cpp diff --git a/services/game/src/engine/shadowmap.cpp b/game/src/engine/shadowmap.cpp similarity index 100% rename from services/game/src/engine/shadowmap.cpp rename to game/src/engine/shadowmap.cpp diff --git a/services/game/src/engine/skelmodel.h b/game/src/engine/skelmodel.h similarity index 100% rename from services/game/src/engine/skelmodel.h rename to game/src/engine/skelmodel.h diff --git a/services/game/src/engine/smd.h b/game/src/engine/smd.h similarity index 100% rename from services/game/src/engine/smd.h rename to game/src/engine/smd.h diff --git a/services/game/src/engine/sound.cpp b/game/src/engine/sound.cpp similarity index 100% rename from services/game/src/engine/sound.cpp rename to game/src/engine/sound.cpp diff --git a/services/game/src/engine/textedit.h b/game/src/engine/textedit.h similarity index 100% rename from services/game/src/engine/textedit.h rename to game/src/engine/textedit.h diff --git a/services/game/src/engine/texture.cpp b/game/src/engine/texture.cpp similarity index 100% rename from services/game/src/engine/texture.cpp rename to game/src/engine/texture.cpp diff --git a/services/game/src/engine/texture.h b/game/src/engine/texture.h similarity index 100% rename from services/game/src/engine/texture.h rename to game/src/engine/texture.h diff --git a/services/game/src/engine/vertmodel.h b/game/src/engine/vertmodel.h similarity index 100% rename from services/game/src/engine/vertmodel.h rename to game/src/engine/vertmodel.h diff --git a/services/game/src/engine/water.cpp b/game/src/engine/water.cpp similarity index 100% rename from services/game/src/engine/water.cpp rename to game/src/engine/water.cpp diff --git a/services/game/src/engine/world.cpp b/game/src/engine/world.cpp similarity index 100% rename from services/game/src/engine/world.cpp rename to game/src/engine/world.cpp diff --git a/services/game/src/engine/world.h b/game/src/engine/world.h similarity index 100% rename from services/game/src/engine/world.h rename to game/src/engine/world.h diff --git a/services/game/src/engine/worldio.cpp b/game/src/engine/worldio.cpp similarity index 100% rename from services/game/src/engine/worldio.cpp rename to game/src/engine/worldio.cpp diff --git a/services/game/src/fpsgame/ai.cpp b/game/src/fpsgame/ai.cpp similarity index 100% rename from services/game/src/fpsgame/ai.cpp rename to game/src/fpsgame/ai.cpp diff --git a/services/game/src/fpsgame/ai.h b/game/src/fpsgame/ai.h similarity index 100% rename from services/game/src/fpsgame/ai.h rename to game/src/fpsgame/ai.h diff --git a/services/game/src/fpsgame/aiman.h b/game/src/fpsgame/aiman.h similarity index 100% rename from services/game/src/fpsgame/aiman.h rename to game/src/fpsgame/aiman.h diff --git a/services/game/src/fpsgame/capture.h b/game/src/fpsgame/capture.h similarity index 100% rename from services/game/src/fpsgame/capture.h rename to game/src/fpsgame/capture.h diff --git a/services/game/src/fpsgame/client.cpp b/game/src/fpsgame/client.cpp similarity index 100% rename from services/game/src/fpsgame/client.cpp rename to game/src/fpsgame/client.cpp diff --git a/services/game/src/fpsgame/collect.h b/game/src/fpsgame/collect.h similarity index 100% rename from services/game/src/fpsgame/collect.h rename to game/src/fpsgame/collect.h diff --git a/services/game/src/fpsgame/ctf.h b/game/src/fpsgame/ctf.h similarity index 100% rename from services/game/src/fpsgame/ctf.h rename to game/src/fpsgame/ctf.h diff --git a/services/game/src/fpsgame/entities.cpp b/game/src/fpsgame/entities.cpp similarity index 100% rename from services/game/src/fpsgame/entities.cpp rename to game/src/fpsgame/entities.cpp diff --git a/services/game/src/fpsgame/extinfo.h b/game/src/fpsgame/extinfo.h similarity index 100% rename from services/game/src/fpsgame/extinfo.h rename to game/src/fpsgame/extinfo.h diff --git a/services/game/src/fpsgame/fps.cpp b/game/src/fpsgame/fps.cpp similarity index 100% rename from services/game/src/fpsgame/fps.cpp rename to game/src/fpsgame/fps.cpp diff --git a/services/game/src/fpsgame/game.h b/game/src/fpsgame/game.h similarity index 100% rename from services/game/src/fpsgame/game.h rename to game/src/fpsgame/game.h diff --git a/services/game/src/fpsgame/monster.cpp b/game/src/fpsgame/monster.cpp similarity index 100% rename from services/game/src/fpsgame/monster.cpp rename to game/src/fpsgame/monster.cpp diff --git a/services/game/src/fpsgame/movable.cpp b/game/src/fpsgame/movable.cpp similarity index 100% rename from services/game/src/fpsgame/movable.cpp rename to game/src/fpsgame/movable.cpp diff --git a/services/game/src/fpsgame/pch.cpp b/game/src/fpsgame/pch.cpp similarity index 100% rename from services/game/src/fpsgame/pch.cpp rename to game/src/fpsgame/pch.cpp diff --git a/services/game/src/fpsgame/render.cpp b/game/src/fpsgame/render.cpp similarity index 100% rename from services/game/src/fpsgame/render.cpp rename to game/src/fpsgame/render.cpp diff --git a/services/game/src/fpsgame/scoreboard.cpp b/game/src/fpsgame/scoreboard.cpp similarity index 100% rename from services/game/src/fpsgame/scoreboard.cpp rename to game/src/fpsgame/scoreboard.cpp diff --git a/services/game/src/fpsgame/server.cpp b/game/src/fpsgame/server.cpp similarity index 100% rename from services/game/src/fpsgame/server.cpp rename to game/src/fpsgame/server.cpp diff --git a/services/game/src/fpsgame/waypoint.cpp b/game/src/fpsgame/waypoint.cpp similarity index 100% rename from services/game/src/fpsgame/waypoint.cpp rename to game/src/fpsgame/waypoint.cpp diff --git a/services/game/src/fpsgame/weapon.cpp b/game/src/fpsgame/weapon.cpp similarity index 100% rename from services/game/src/fpsgame/weapon.cpp rename to game/src/fpsgame/weapon.cpp diff --git a/services/game/src/include/SDL.h b/game/src/include/SDL.h similarity index 100% rename from services/game/src/include/SDL.h rename to game/src/include/SDL.h diff --git a/services/game/src/include/SDL_assert.h b/game/src/include/SDL_assert.h similarity index 100% rename from services/game/src/include/SDL_assert.h rename to game/src/include/SDL_assert.h diff --git a/services/game/src/include/SDL_atomic.h b/game/src/include/SDL_atomic.h similarity index 100% rename from services/game/src/include/SDL_atomic.h rename to game/src/include/SDL_atomic.h diff --git a/services/game/src/include/SDL_audio.h b/game/src/include/SDL_audio.h similarity index 100% rename from services/game/src/include/SDL_audio.h rename to game/src/include/SDL_audio.h diff --git a/services/game/src/include/SDL_bits.h b/game/src/include/SDL_bits.h similarity index 100% rename from services/game/src/include/SDL_bits.h rename to game/src/include/SDL_bits.h diff --git a/services/game/src/include/SDL_blendmode.h b/game/src/include/SDL_blendmode.h similarity index 100% rename from services/game/src/include/SDL_blendmode.h rename to game/src/include/SDL_blendmode.h diff --git a/services/game/src/include/SDL_clipboard.h b/game/src/include/SDL_clipboard.h similarity index 100% rename from services/game/src/include/SDL_clipboard.h rename to game/src/include/SDL_clipboard.h diff --git a/services/game/src/include/SDL_config.h b/game/src/include/SDL_config.h similarity index 100% rename from services/game/src/include/SDL_config.h rename to game/src/include/SDL_config.h diff --git a/services/game/src/include/SDL_config.h.cmake b/game/src/include/SDL_config.h.cmake similarity index 100% rename from services/game/src/include/SDL_config.h.cmake rename to game/src/include/SDL_config.h.cmake diff --git a/services/game/src/include/SDL_config.h.in b/game/src/include/SDL_config.h.in similarity index 100% rename from services/game/src/include/SDL_config.h.in rename to game/src/include/SDL_config.h.in diff --git a/services/game/src/include/SDL_config_android.h b/game/src/include/SDL_config_android.h similarity index 100% rename from services/game/src/include/SDL_config_android.h rename to game/src/include/SDL_config_android.h diff --git a/services/game/src/include/SDL_config_iphoneos.h b/game/src/include/SDL_config_iphoneos.h similarity index 100% rename from services/game/src/include/SDL_config_iphoneos.h rename to game/src/include/SDL_config_iphoneos.h diff --git a/services/game/src/include/SDL_config_macosx.h b/game/src/include/SDL_config_macosx.h similarity index 100% rename from services/game/src/include/SDL_config_macosx.h rename to game/src/include/SDL_config_macosx.h diff --git a/services/game/src/include/SDL_config_minimal.h b/game/src/include/SDL_config_minimal.h similarity index 100% rename from services/game/src/include/SDL_config_minimal.h rename to game/src/include/SDL_config_minimal.h diff --git a/services/game/src/include/SDL_config_os2.h b/game/src/include/SDL_config_os2.h similarity index 100% rename from services/game/src/include/SDL_config_os2.h rename to game/src/include/SDL_config_os2.h diff --git a/services/game/src/include/SDL_config_pandora.h b/game/src/include/SDL_config_pandora.h similarity index 100% rename from services/game/src/include/SDL_config_pandora.h rename to game/src/include/SDL_config_pandora.h diff --git a/services/game/src/include/SDL_config_psp.h b/game/src/include/SDL_config_psp.h similarity index 100% rename from services/game/src/include/SDL_config_psp.h rename to game/src/include/SDL_config_psp.h diff --git a/services/game/src/include/SDL_config_win32.h b/game/src/include/SDL_config_win32.h similarity index 100% rename from services/game/src/include/SDL_config_win32.h rename to game/src/include/SDL_config_win32.h diff --git a/services/game/src/include/SDL_config_windows.h b/game/src/include/SDL_config_windows.h similarity index 100% rename from services/game/src/include/SDL_config_windows.h rename to game/src/include/SDL_config_windows.h diff --git a/services/game/src/include/SDL_config_winrt.h b/game/src/include/SDL_config_winrt.h similarity index 100% rename from services/game/src/include/SDL_config_winrt.h rename to game/src/include/SDL_config_winrt.h diff --git a/services/game/src/include/SDL_config_wiz.h b/game/src/include/SDL_config_wiz.h similarity index 100% rename from services/game/src/include/SDL_config_wiz.h rename to game/src/include/SDL_config_wiz.h diff --git a/services/game/src/include/SDL_copying.h b/game/src/include/SDL_copying.h similarity index 100% rename from services/game/src/include/SDL_copying.h rename to game/src/include/SDL_copying.h diff --git a/services/game/src/include/SDL_cpuinfo.h b/game/src/include/SDL_cpuinfo.h similarity index 100% rename from services/game/src/include/SDL_cpuinfo.h rename to game/src/include/SDL_cpuinfo.h diff --git a/services/game/src/include/SDL_egl.h b/game/src/include/SDL_egl.h similarity index 100% rename from services/game/src/include/SDL_egl.h rename to game/src/include/SDL_egl.h diff --git a/services/game/src/include/SDL_endian.h b/game/src/include/SDL_endian.h similarity index 100% rename from services/game/src/include/SDL_endian.h rename to game/src/include/SDL_endian.h diff --git a/services/game/src/include/SDL_error.h b/game/src/include/SDL_error.h similarity index 100% rename from services/game/src/include/SDL_error.h rename to game/src/include/SDL_error.h diff --git a/services/game/src/include/SDL_events.h b/game/src/include/SDL_events.h similarity index 100% rename from services/game/src/include/SDL_events.h rename to game/src/include/SDL_events.h diff --git a/services/game/src/include/SDL_filesystem.h b/game/src/include/SDL_filesystem.h similarity index 100% rename from services/game/src/include/SDL_filesystem.h rename to game/src/include/SDL_filesystem.h diff --git a/services/game/src/include/SDL_gamecontroller.h b/game/src/include/SDL_gamecontroller.h similarity index 100% rename from services/game/src/include/SDL_gamecontroller.h rename to game/src/include/SDL_gamecontroller.h diff --git a/services/game/src/include/SDL_gesture.h b/game/src/include/SDL_gesture.h similarity index 100% rename from services/game/src/include/SDL_gesture.h rename to game/src/include/SDL_gesture.h diff --git a/services/game/src/include/SDL_haptic.h b/game/src/include/SDL_haptic.h similarity index 100% rename from services/game/src/include/SDL_haptic.h rename to game/src/include/SDL_haptic.h diff --git a/services/game/src/include/SDL_hints.h b/game/src/include/SDL_hints.h similarity index 100% rename from services/game/src/include/SDL_hints.h rename to game/src/include/SDL_hints.h diff --git a/services/game/src/include/SDL_image.h b/game/src/include/SDL_image.h similarity index 100% rename from services/game/src/include/SDL_image.h rename to game/src/include/SDL_image.h diff --git a/services/game/src/include/SDL_joystick.h b/game/src/include/SDL_joystick.h similarity index 100% rename from services/game/src/include/SDL_joystick.h rename to game/src/include/SDL_joystick.h diff --git a/services/game/src/include/SDL_keyboard.h b/game/src/include/SDL_keyboard.h similarity index 100% rename from services/game/src/include/SDL_keyboard.h rename to game/src/include/SDL_keyboard.h diff --git a/services/game/src/include/SDL_keycode.h b/game/src/include/SDL_keycode.h similarity index 100% rename from services/game/src/include/SDL_keycode.h rename to game/src/include/SDL_keycode.h diff --git a/services/game/src/include/SDL_loadso.h b/game/src/include/SDL_loadso.h similarity index 100% rename from services/game/src/include/SDL_loadso.h rename to game/src/include/SDL_loadso.h diff --git a/services/game/src/include/SDL_log.h b/game/src/include/SDL_log.h similarity index 100% rename from services/game/src/include/SDL_log.h rename to game/src/include/SDL_log.h diff --git a/services/game/src/include/SDL_main.h b/game/src/include/SDL_main.h similarity index 100% rename from services/game/src/include/SDL_main.h rename to game/src/include/SDL_main.h diff --git a/services/game/src/include/SDL_messagebox.h b/game/src/include/SDL_messagebox.h similarity index 100% rename from services/game/src/include/SDL_messagebox.h rename to game/src/include/SDL_messagebox.h diff --git a/services/game/src/include/SDL_metal.h b/game/src/include/SDL_metal.h similarity index 100% rename from services/game/src/include/SDL_metal.h rename to game/src/include/SDL_metal.h diff --git a/services/game/src/include/SDL_mixer.h b/game/src/include/SDL_mixer.h similarity index 100% rename from services/game/src/include/SDL_mixer.h rename to game/src/include/SDL_mixer.h diff --git a/services/game/src/include/SDL_mouse.h b/game/src/include/SDL_mouse.h similarity index 100% rename from services/game/src/include/SDL_mouse.h rename to game/src/include/SDL_mouse.h diff --git a/services/game/src/include/SDL_mutex.h b/game/src/include/SDL_mutex.h similarity index 100% rename from services/game/src/include/SDL_mutex.h rename to game/src/include/SDL_mutex.h diff --git a/services/game/src/include/SDL_name.h b/game/src/include/SDL_name.h similarity index 100% rename from services/game/src/include/SDL_name.h rename to game/src/include/SDL_name.h diff --git a/services/game/src/include/SDL_opengl.h b/game/src/include/SDL_opengl.h similarity index 100% rename from services/game/src/include/SDL_opengl.h rename to game/src/include/SDL_opengl.h diff --git a/services/game/src/include/SDL_opengl_glext.h b/game/src/include/SDL_opengl_glext.h similarity index 100% rename from services/game/src/include/SDL_opengl_glext.h rename to game/src/include/SDL_opengl_glext.h diff --git a/services/game/src/include/SDL_opengles.h b/game/src/include/SDL_opengles.h similarity index 100% rename from services/game/src/include/SDL_opengles.h rename to game/src/include/SDL_opengles.h diff --git a/services/game/src/include/SDL_opengles2.h b/game/src/include/SDL_opengles2.h similarity index 100% rename from services/game/src/include/SDL_opengles2.h rename to game/src/include/SDL_opengles2.h diff --git a/services/game/src/include/SDL_opengles2_gl2.h b/game/src/include/SDL_opengles2_gl2.h similarity index 100% rename from services/game/src/include/SDL_opengles2_gl2.h rename to game/src/include/SDL_opengles2_gl2.h diff --git a/services/game/src/include/SDL_opengles2_gl2ext.h b/game/src/include/SDL_opengles2_gl2ext.h similarity index 100% rename from services/game/src/include/SDL_opengles2_gl2ext.h rename to game/src/include/SDL_opengles2_gl2ext.h diff --git a/services/game/src/include/SDL_opengles2_gl2platform.h b/game/src/include/SDL_opengles2_gl2platform.h similarity index 100% rename from services/game/src/include/SDL_opengles2_gl2platform.h rename to game/src/include/SDL_opengles2_gl2platform.h diff --git a/services/game/src/include/SDL_opengles2_khrplatform.h b/game/src/include/SDL_opengles2_khrplatform.h similarity index 100% rename from services/game/src/include/SDL_opengles2_khrplatform.h rename to game/src/include/SDL_opengles2_khrplatform.h diff --git a/services/game/src/include/SDL_pixels.h b/game/src/include/SDL_pixels.h similarity index 100% rename from services/game/src/include/SDL_pixels.h rename to game/src/include/SDL_pixels.h diff --git a/services/game/src/include/SDL_platform.h b/game/src/include/SDL_platform.h similarity index 100% rename from services/game/src/include/SDL_platform.h rename to game/src/include/SDL_platform.h diff --git a/services/game/src/include/SDL_power.h b/game/src/include/SDL_power.h similarity index 100% rename from services/game/src/include/SDL_power.h rename to game/src/include/SDL_power.h diff --git a/services/game/src/include/SDL_quit.h b/game/src/include/SDL_quit.h similarity index 100% rename from services/game/src/include/SDL_quit.h rename to game/src/include/SDL_quit.h diff --git a/services/game/src/include/SDL_rect.h b/game/src/include/SDL_rect.h similarity index 100% rename from services/game/src/include/SDL_rect.h rename to game/src/include/SDL_rect.h diff --git a/services/game/src/include/SDL_render.h b/game/src/include/SDL_render.h similarity index 100% rename from services/game/src/include/SDL_render.h rename to game/src/include/SDL_render.h diff --git a/services/game/src/include/SDL_revision.h b/game/src/include/SDL_revision.h similarity index 100% rename from services/game/src/include/SDL_revision.h rename to game/src/include/SDL_revision.h diff --git a/services/game/src/include/SDL_rwops.h b/game/src/include/SDL_rwops.h similarity index 100% rename from services/game/src/include/SDL_rwops.h rename to game/src/include/SDL_rwops.h diff --git a/services/game/src/include/SDL_scancode.h b/game/src/include/SDL_scancode.h similarity index 100% rename from services/game/src/include/SDL_scancode.h rename to game/src/include/SDL_scancode.h diff --git a/services/game/src/include/SDL_sensor.h b/game/src/include/SDL_sensor.h similarity index 100% rename from services/game/src/include/SDL_sensor.h rename to game/src/include/SDL_sensor.h diff --git a/services/game/src/include/SDL_shape.h b/game/src/include/SDL_shape.h similarity index 100% rename from services/game/src/include/SDL_shape.h rename to game/src/include/SDL_shape.h diff --git a/services/game/src/include/SDL_stdinc.h b/game/src/include/SDL_stdinc.h similarity index 100% rename from services/game/src/include/SDL_stdinc.h rename to game/src/include/SDL_stdinc.h diff --git a/services/game/src/include/SDL_surface.h b/game/src/include/SDL_surface.h similarity index 100% rename from services/game/src/include/SDL_surface.h rename to game/src/include/SDL_surface.h diff --git a/services/game/src/include/SDL_system.h b/game/src/include/SDL_system.h similarity index 100% rename from services/game/src/include/SDL_system.h rename to game/src/include/SDL_system.h diff --git a/services/game/src/include/SDL_syswm.h b/game/src/include/SDL_syswm.h similarity index 100% rename from services/game/src/include/SDL_syswm.h rename to game/src/include/SDL_syswm.h diff --git a/services/game/src/include/SDL_thread.h b/game/src/include/SDL_thread.h similarity index 100% rename from services/game/src/include/SDL_thread.h rename to game/src/include/SDL_thread.h diff --git a/services/game/src/include/SDL_timer.h b/game/src/include/SDL_timer.h similarity index 100% rename from services/game/src/include/SDL_timer.h rename to game/src/include/SDL_timer.h diff --git a/services/game/src/include/SDL_touch.h b/game/src/include/SDL_touch.h similarity index 100% rename from services/game/src/include/SDL_touch.h rename to game/src/include/SDL_touch.h diff --git a/services/game/src/include/SDL_types.h b/game/src/include/SDL_types.h similarity index 100% rename from services/game/src/include/SDL_types.h rename to game/src/include/SDL_types.h diff --git a/services/game/src/include/SDL_version.h b/game/src/include/SDL_version.h similarity index 100% rename from services/game/src/include/SDL_version.h rename to game/src/include/SDL_version.h diff --git a/services/game/src/include/SDL_video.h b/game/src/include/SDL_video.h similarity index 100% rename from services/game/src/include/SDL_video.h rename to game/src/include/SDL_video.h diff --git a/services/game/src/include/SDL_vulkan.h b/game/src/include/SDL_vulkan.h similarity index 100% rename from services/game/src/include/SDL_vulkan.h rename to game/src/include/SDL_vulkan.h diff --git a/services/game/src/include/begin_code.h b/game/src/include/begin_code.h similarity index 100% rename from services/game/src/include/begin_code.h rename to game/src/include/begin_code.h diff --git a/services/game/src/include/close_code.h b/game/src/include/close_code.h similarity index 100% rename from services/game/src/include/close_code.h rename to game/src/include/close_code.h diff --git a/services/game/src/include/zconf.h b/game/src/include/zconf.h similarity index 100% rename from services/game/src/include/zconf.h rename to game/src/include/zconf.h diff --git a/services/game/src/include/zlib.h b/game/src/include/zlib.h similarity index 100% rename from services/game/src/include/zlib.h rename to game/src/include/zlib.h diff --git a/services/game/src/native/Makefile b/game/src/native/Makefile similarity index 100% rename from services/game/src/native/Makefile rename to game/src/native/Makefile diff --git a/services/game/src/native/engine/keepalive b/game/src/native/engine/keepalive similarity index 100% rename from services/game/src/native/engine/keepalive rename to game/src/native/engine/keepalive diff --git a/services/game/src/native/fpsgame/keepalive b/game/src/native/fpsgame/keepalive similarity index 100% rename from services/game/src/native/fpsgame/keepalive rename to game/src/native/fpsgame/keepalive diff --git a/services/game/src/native/shared/keepalive b/game/src/native/shared/keepalive similarity index 100% rename from services/game/src/native/shared/keepalive rename to game/src/native/shared/keepalive diff --git a/services/game/src/readme_source.txt b/game/src/readme_source.txt similarity index 100% rename from services/game/src/readme_source.txt rename to game/src/readme_source.txt diff --git a/services/game/src/shared/command.h b/game/src/shared/command.h similarity index 100% rename from services/game/src/shared/command.h rename to game/src/shared/command.h diff --git a/services/game/src/shared/crypto.cpp b/game/src/shared/crypto.cpp similarity index 100% rename from services/game/src/shared/crypto.cpp rename to game/src/shared/crypto.cpp diff --git a/services/game/src/shared/cube.h b/game/src/shared/cube.h similarity index 100% rename from services/game/src/shared/cube.h rename to game/src/shared/cube.h diff --git a/services/game/src/shared/cube2font.c b/game/src/shared/cube2font.c similarity index 100% rename from services/game/src/shared/cube2font.c rename to game/src/shared/cube2font.c diff --git a/services/game/src/shared/ents.h b/game/src/shared/ents.h similarity index 100% rename from services/game/src/shared/ents.h rename to game/src/shared/ents.h diff --git a/services/game/src/shared/geom.cpp b/game/src/shared/geom.cpp similarity index 100% rename from services/game/src/shared/geom.cpp rename to game/src/shared/geom.cpp diff --git a/services/game/src/shared/geom.h b/game/src/shared/geom.h similarity index 100% rename from services/game/src/shared/geom.h rename to game/src/shared/geom.h diff --git a/services/game/src/shared/glemu.cpp b/game/src/shared/glemu.cpp similarity index 100% rename from services/game/src/shared/glemu.cpp rename to game/src/shared/glemu.cpp diff --git a/services/game/src/shared/glemu.h b/game/src/shared/glemu.h similarity index 100% rename from services/game/src/shared/glemu.h rename to game/src/shared/glemu.h diff --git a/services/game/src/shared/glexts.h b/game/src/shared/glexts.h similarity index 100% rename from services/game/src/shared/glexts.h rename to game/src/shared/glexts.h diff --git a/services/game/src/shared/iengine.h b/game/src/shared/iengine.h similarity index 100% rename from services/game/src/shared/iengine.h rename to game/src/shared/iengine.h diff --git a/services/game/src/shared/igame.h b/game/src/shared/igame.h similarity index 100% rename from services/game/src/shared/igame.h rename to game/src/shared/igame.h diff --git a/services/game/src/shared/pch.cpp b/game/src/shared/pch.cpp similarity index 100% rename from services/game/src/shared/pch.cpp rename to game/src/shared/pch.cpp diff --git a/services/game/src/shared/stream.cpp b/game/src/shared/stream.cpp similarity index 100% rename from services/game/src/shared/stream.cpp rename to game/src/shared/stream.cpp diff --git a/services/game/src/shared/tools.cpp b/game/src/shared/tools.cpp similarity index 100% rename from services/game/src/shared/tools.cpp rename to game/src/shared/tools.cpp diff --git a/services/game/src/shared/tools.h b/game/src/shared/tools.h similarity index 100% rename from services/game/src/shared/tools.h rename to game/src/shared/tools.h diff --git a/services/game/src/shared/zip.cpp b/game/src/shared/zip.cpp similarity index 100% rename from services/game/src/shared/zip.cpp rename to game/src/shared/zip.cpp diff --git a/services/game/src/web/.agignore b/game/src/web/.agignore similarity index 100% rename from services/game/src/web/.agignore rename to game/src/web/.agignore diff --git a/services/game/src/web/engine/keepalive b/game/src/web/engine/keepalive similarity index 100% rename from services/game/src/web/engine/keepalive rename to game/src/web/engine/keepalive diff --git a/services/game/src/web/fpsgame/keepalive b/game/src/web/fpsgame/keepalive similarity index 100% rename from services/game/src/web/fpsgame/keepalive rename to game/src/web/fpsgame/keepalive diff --git a/services/game/src/web/shared/keepalive b/game/src/web/shared/keepalive similarity index 100% rename from services/game/src/web/shared/keepalive rename to game/src/web/shared/keepalive diff --git a/services/game/websocket_hook.patch b/game/websocket_hook.patch similarity index 100% rename from services/game/websocket_hook.patch rename to game/websocket_hook.patch diff --git a/gitpod/Dockerfile b/gitpod/Dockerfile deleted file mode 100644 index b422369bc..000000000 --- a/gitpod/Dockerfile +++ /dev/null @@ -1,38 +0,0 @@ -FROM gitpod/workspace-full - -RUN sudo apt-get update && \ - sudo apt-get install -o Dpkg::Options::="--force-confold" -qqy \ - build-essential \ - cmake \ - graphviz \ - imagemagick \ - inotify-tools \ - libenet-dev \ - nginx \ - socat \ - swig \ - unrar \ - valgrind \ - zlib1g-dev \ - && \ - sudo rm -rf /var/lib/apt/lists/* - -RUN cd /tmp && \ - wget https://github.com/redis/redis/archive/7.0.7.tar.gz && \ - tar xvf 7.0.7.tar.gz && \ - cd redis-7.0.7 && \ - sudo make install - -RUN cd /home/gitpod && \ - git clone https://github.com/emscripten-core/emsdk.git && \ - cd emsdk && \ - ./emsdk install 3.1.8 && \ - ./emsdk activate 3.1.8 && \ - bash -c 'source /home/gitpod/emsdk/emsdk_env.sh && npm install -g yarn@1.22.5 prettier' && \ - echo 'source "/home/gitpod/emsdk/emsdk_env.sh"' >> /home/gitpod/.bashrc - -RUN /bin/bash -c 'source /home/gitpod/.nvm/nvm.sh && nvm install 14.17.5 && nvm alias default 14.17.5' && \ - echo 'source "/home/gitpod/.nvm/nvm.sh"' >> /home/gitpod/.bashrc - -RUN go install cuelang.org/go/cmd/cue@latest -ENV EMSDK_QUIET 1 diff --git a/services/go/go.mod b/go.mod similarity index 91% rename from services/go/go.mod rename to go.mod index 2daa47382..5c268b871 100644 --- a/services/go/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/cfoust/sour -go 1.18 +go 1.22.0 require ( github.com/cespare/xxhash/v2 v2.1.2 // indirect @@ -16,11 +16,11 @@ require ( github.com/mattn/go-isatty v0.0.16 // indirect github.com/mattn/go-sqlite3 v1.14.16 // indirect github.com/mileusna/useragent v1.2.1 // indirect - github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 // indirect + github.com/petermattis/goid v0.0.0-20241025130422-66cb2e6d7274 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/repeale/fp-go v0.11.1 // indirect github.com/rs/zerolog v1.28.0 // indirect - github.com/sasha-s/go-deadlock v0.3.1 // indirect + github.com/sasha-s/go-deadlock v0.3.5 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/stretchr/testify v1.8.2 // indirect github.com/x448/float16 v0.8.4 // indirect diff --git a/services/go/go.sum b/go.sum similarity index 94% rename from services/go/go.sum rename to go.sum index ecd2d034f..6774a7ffc 100644 --- a/services/go/go.sum +++ b/go.sum @@ -54,6 +54,9 @@ github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU= github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/petermattis/goid v0.0.0-20241025130422-66cb2e6d7274 h1:qli3BGQK0tYDkSEvZ/FzZTi9ZrOX86Q6CIhKLGc489A= +github.com/petermattis/goid v0.0.0-20241025130422-66cb2e6d7274/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -64,6 +67,8 @@ github.com/rs/zerolog v1.28.0 h1:MirSo27VyNi7RJYP3078AA1+Cyzd2GB66qy3aUHvsWY= github.com/rs/zerolog v1.28.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU= +github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= diff --git a/services/go/pkg/assets/cache.go b/pkg/assets/cache.go similarity index 100% rename from services/go/pkg/assets/cache.go rename to pkg/assets/cache.go diff --git a/services/go/pkg/assets/module.go b/pkg/assets/module.go similarity index 97% rename from services/go/pkg/assets/module.go rename to pkg/assets/module.go index ee0ff4225..48178c390 100644 --- a/services/go/pkg/assets/module.go +++ b/pkg/assets/module.go @@ -21,7 +21,7 @@ type Job struct { type AssetFetcher struct { assets chan Job bundles chan Job - roots []*RemoteRoot + roots []*PackagedRoot cache Store } @@ -31,9 +31,9 @@ func NewAssetFetcher(ctx context.Context, cache Store, roots []string, onlyMaps return nil, err } - remotes := make([]*RemoteRoot, 0) + remotes := make([]*PackagedRoot, 0) for _, root := range loaded { - if remote, ok := root.(*RemoteRoot); ok { + if remote, ok := root.(*PackagedRoot); ok { remotes = append(remotes, remote) } } @@ -174,7 +174,7 @@ func (m *AssetFetcher) fetchBundle(ctx context.Context, id string) ([]byte, erro type FoundMap struct { Map *SlimMap - Root *RemoteRoot + Root *PackagedRoot fetch *AssetFetcher } diff --git a/services/go/pkg/assets/roots.go b/pkg/assets/roots.go similarity index 69% rename from services/go/pkg/assets/roots.go rename to pkg/assets/roots.go index c5e664b31..a2b882473 100644 --- a/services/go/pkg/assets/roots.go +++ b/pkg/assets/roots.go @@ -46,10 +46,106 @@ func (f FSRoot) Reference(ctx context.Context, path string) (string, error) { return fmt.Sprintf("fs:%s", f.getPath(path)), nil } -type RemoteRoot struct { - cache Store +type packageReader interface { + Index(ctx context.Context) ([]byte, error) + Read(ctx context.Context, id string) ([]byte, error) +} + +type remoteReader struct { + indexURL string + assetURL string + cache Store + shouldCache bool +} + +var _ packageReader = (*remoteReader)(nil) + +func (r *remoteReader) Index(ctx context.Context) ([]byte, error) { + urlHash := fmt.Sprintf("%x", sha256.Sum256([]byte(r.indexURL))) + + data, err := r.cache.Get(ctx, urlHash) + if err == nil { + return data, nil + } + + if err != Missing { + return nil, err + } + + data, err = DownloadBytes(r.indexURL) + if err != nil { + return nil, err + } + + if r.shouldCache { + err = r.cache.Set(ctx, urlHash, data) + if err != nil { + return nil, err + } + } + + return data, nil +} + +func (r *remoteReader) Read(ctx context.Context, id string) ([]byte, error) { + data, err := r.cache.Get(ctx, id) + if err == nil { + return data, nil + } + + if err != Missing { + return nil, err + } + + url := fmt.Sprintf("%s%s", r.assetURL, id) + data, err = DownloadBytes(url) + if err != nil { + return nil, err + } + + if r.shouldCache { + err = r.cache.Set(ctx, id, data) + if err != nil { + return nil, err + } + } + + return data, nil +} + +func NewRemoteReader( + url string, + cache Store, + shouldCache bool, +) *remoteReader { + return &remoteReader{ + indexURL: url, + assetURL: CleanSourcePath(url), + cache: cache, + shouldCache: shouldCache, + } +} + +type fsReader struct { + indexPath string + assetPath string +} + +var _ packageReader = (*fsReader)(nil) + +func (f *fsReader) Index(ctx context.Context) ([]byte, error) { + return os.ReadFile(f.indexPath) +} + +func (f *fsReader) Read(ctx context.Context, id string) ([]byte, error) { + return os.ReadFile(filepath.Join(f.assetPath, id)) +} + +type PackagedRoot struct { source string - url string + + reader packageReader + // A path inside of the virtual FS to treat as the "root". base string @@ -72,44 +168,27 @@ type RemoteRoot struct { FS map[string]int } -func NewRemoteRoot( +func NewPackagedRoot( ctx context.Context, - cache Store, - url string, + reader packageReader, base string, - shouldCache bool, skip bool, -) (*RemoteRoot, error) { - urlHash := fmt.Sprintf("%x", sha256.Sum256([]byte(url))) - - indexData, err := cache.Get(ctx, urlHash) +) (*PackagedRoot, error) { + indexData, err := reader.Index(ctx) if err != nil { - if err != Missing { - return nil, err - } - - indexData, err = DownloadBytes(url) - if err != nil { - return nil, err - } - - if shouldCache { - err = cache.Set(ctx, urlHash, indexData) - if err != nil { - return nil, err - } - } + return nil, err } var index Index if err := cbor.Unmarshal(indexData, &index); err != nil { - return nil, fmt.Errorf("error decoding %s: %s", url, err) + return nil, fmt.Errorf( + "error decoding index: %s", + err, + ) } - root := RemoteRoot{ - cache: cache, - url: CleanSourcePath(url), - source: url, + root := PackagedRoot{ + reader: reader, base: base, skip: skip, } @@ -181,12 +260,12 @@ func NewRemoteRoot( return &root, nil } -func (f *RemoteRoot) Exists(ctx context.Context, path string) bool { +func (f *PackagedRoot) Exists(ctx context.Context, path string) bool { _, ok := f.FS[path] return ok } -func (f *RemoteRoot) GetID(index int) (string, error) { +func (f *PackagedRoot) GetID(index int) (string, error) { if id, ok := f.idLookup[index]; ok { return id, nil } @@ -194,7 +273,7 @@ func (f *RemoteRoot) GetID(index int) (string, error) { return "", Missing } -func (f *RemoteRoot) Reference(ctx context.Context, path string) (string, error) { +func (f *PackagedRoot) Reference(ctx context.Context, path string) (string, error) { index, ok := f.FS[path] if !ok { return "", Missing @@ -208,34 +287,15 @@ func (f *RemoteRoot) Reference(ctx context.Context, path string) (string, error) return fmt.Sprintf("id:%s", id), nil } -func (f *RemoteRoot) ReadAsset(ctx context.Context, id string) ([]byte, error) { +func (f *PackagedRoot) ReadAsset(ctx context.Context, id string) ([]byte, error) { if _, ok := f.assets[id]; !ok { return nil, Missing } - cacheData, err := f.cache.Get(ctx, id) - if err != nil && err != Missing { - return nil, err - } - if err == nil { - return cacheData, nil - } - - url := fmt.Sprintf("%s%s", f.url, id) - data, err := DownloadBytes(url) - if err != nil { - return nil, err - } - - err = f.cache.Set(ctx, id, data) - if err != nil { - return nil, err - } - - return data, nil + return f.reader.Read(ctx, id) } -func (f *RemoteRoot) ReadFile(ctx context.Context, path string) ([]byte, error) { +func (f *PackagedRoot) ReadFile(ctx context.Context, path string) ([]byte, error) { index, ok := f.FS[path] if !ok { return nil, Missing @@ -250,12 +310,12 @@ func (f *RemoteRoot) ReadFile(ctx context.Context, path string) ([]byte, error) } var _ Root = (*FSRoot)(nil) -var _ Root = (*RemoteRoot)(nil) +var _ Root = (*PackagedRoot)(nil) func LoadRoots(ctx context.Context, cache Store, targets []string, onlyMaps bool) ([]Root, error) { roots := make([]Root, 0) haveSkip := false - var skipRoot *RemoteRoot + var skipRoot *PackagedRoot for _, target := range targets { // Specify a base dir with @/base/dir @@ -282,6 +342,31 @@ func LoadRoots(ctx context.Context, cache Store, targets []string, onlyMaps bool target = target[1:] } + if strings.HasPrefix(target, "fs:") { + absolute, err := filepath.Abs(target[3:]) + if err != nil { + return nil, err + } + + reader := &fsReader{ + indexPath: absolute, + assetPath: filepath.Dir(absolute), + } + + root, err := NewPackagedRoot( + ctx, + reader, + base, + skip, + ) + if err != nil { + return nil, err + } + root.source = target + roots = append(roots, root) + continue + } + if !strings.HasPrefix(target, "http") { absolute, err := filepath.Abs(target) if err != nil { @@ -291,18 +376,24 @@ func LoadRoots(ctx context.Context, cache Store, targets []string, onlyMaps bool continue } - root, err := NewRemoteRoot( - ctx, - cache, + reader := NewRemoteReader( target, - base, + cache, shouldCache, + ) + + root, err := NewPackagedRoot( + ctx, + reader, + base, skip, ) if err != nil { return nil, err } + root.source = target + if skip { skipRoot = root } @@ -313,7 +404,7 @@ func LoadRoots(ctx context.Context, cache Store, targets []string, onlyMaps bool // root if haveSkip { for _, root := range roots { - remote, ok := root.(*RemoteRoot) + remote, ok := root.(*PackagedRoot) if !ok { continue } @@ -348,7 +439,7 @@ func LoadRoots(ctx context.Context, cache Store, targets []string, onlyMaps bool // First pass: note all of the assets used by maps mapAssets := make(map[string]struct{}) for _, root := range roots { - remote, ok := root.(*RemoteRoot) + remote, ok := root.(*PackagedRoot) if !ok { continue } @@ -364,7 +455,7 @@ func LoadRoots(ctx context.Context, cache Store, targets []string, onlyMaps bool // Second pass: clear out assets not used by maps for _, root := range roots { - remote, ok := root.(*RemoteRoot) + remote, ok := root.(*PackagedRoot) if !ok { continue } diff --git a/services/go/pkg/assets/types.go b/pkg/assets/types.go similarity index 100% rename from services/go/pkg/assets/types.go rename to pkg/assets/types.go diff --git a/services/go/pkg/assets/utils.go b/pkg/assets/utils.go similarity index 100% rename from services/go/pkg/assets/utils.go rename to pkg/assets/utils.go diff --git a/services/go/pkg/chanlock/LICENSE b/pkg/chanlock/LICENSE similarity index 100% rename from services/go/pkg/chanlock/LICENSE rename to pkg/chanlock/LICENSE diff --git a/services/go/pkg/chanlock/README.md b/pkg/chanlock/README.md similarity index 100% rename from services/go/pkg/chanlock/README.md rename to pkg/chanlock/README.md diff --git a/services/go/pkg/chanlock/module.go b/pkg/chanlock/module.go similarity index 100% rename from services/go/pkg/chanlock/module.go rename to pkg/chanlock/module.go diff --git a/services/go/pkg/chanlock/stacktraces.go b/pkg/chanlock/stacktraces.go similarity index 100% rename from services/go/pkg/chanlock/stacktraces.go rename to pkg/chanlock/stacktraces.go diff --git a/services/go/pkg/cs/command.cpp b/pkg/cs/command.cpp similarity index 100% rename from services/go/pkg/cs/command.cpp rename to pkg/cs/command.cpp diff --git a/services/go/pkg/cs/command.h b/pkg/cs/command.h similarity index 100% rename from services/go/pkg/cs/command.h rename to pkg/cs/command.h diff --git a/services/go/pkg/cs/cs.go b/pkg/cs/cs.go similarity index 100% rename from services/go/pkg/cs/cs.go rename to pkg/cs/cs.go diff --git a/services/go/pkg/cs/cs.swigcxx b/pkg/cs/cs.swigcxx similarity index 100% rename from services/go/pkg/cs/cs.swigcxx rename to pkg/cs/cs.swigcxx diff --git a/services/go/pkg/cs/wrap.h b/pkg/cs/wrap.h similarity index 100% rename from services/go/pkg/cs/wrap.h rename to pkg/cs/wrap.h diff --git a/services/go/pkg/enet/LICENSE b/pkg/enet/LICENSE similarity index 100% rename from services/go/pkg/enet/LICENSE rename to pkg/enet/LICENSE diff --git a/services/go/pkg/enet/README.md b/pkg/enet/README.md similarity index 100% rename from services/go/pkg/enet/README.md rename to pkg/enet/README.md diff --git a/services/go/pkg/enet/disconnect_reasons.go b/pkg/enet/disconnect_reasons.go similarity index 100% rename from services/go/pkg/enet/disconnect_reasons.go rename to pkg/enet/disconnect_reasons.go diff --git a/pkg/enet/enet/.gitignore b/pkg/enet/enet/.gitignore new file mode 100644 index 000000000..6d7cdbf89 --- /dev/null +++ b/pkg/enet/enet/.gitignore @@ -0,0 +1,70 @@ +# Potential build directories +[Aa][Rr][Mm]/ +[Aa][Rr][Mm]64/ +[Bb]in/ +[Dd]ebug/ +[Dd]ebugPublic/ +[Ll]og/ +[Ll]ogs/ +[Oo]bj/ +[Rr]elease/ +[Rr]eleases/ +[Ww][Ii][Nn]32/ +bld/ +build/ +builds/ +out/ +x64/ +x86/ + +# VS +.vs/ +.vscode/ +!.vscode/extensions.json +!.vscode/launch.json +!.vscode/settings.json +!.vscode/tasks.json + +# CMake +_deps +CMakeCache.txt +CMakeFiles +CMakeLists.txt.user +CMakeScripts +CMakeUserPresets.json +CTestTestfile.cmake +cmake_install.cmake +compile_commands.json +install_manifest.txt + +# Prerequisites +*.d + +# Object files +*.o +*.ko +*.obj +*.elf + +# Linker output +*.ilk +*.map +*.exp + +# Libraries +*.lib +*.a +*.la +*.lo + +# Shared objects +*.dll +*.so +*.so.* +*.dylib + +# Debug files +*.dSYM/ +*.su +*.idb +*.pdb diff --git a/pkg/enet/enet/CMakeLists.txt b/pkg/enet/enet/CMakeLists.txt new file mode 100644 index 000000000..2398ccc43 --- /dev/null +++ b/pkg/enet/enet/CMakeLists.txt @@ -0,0 +1,109 @@ +cmake_minimum_required(VERSION 2.8.12...3.20) + +project(enet) + +# The "configure" step. +include(CheckFunctionExists) +include(CheckStructHasMember) +include(CheckTypeSize) +check_function_exists("fcntl" HAS_FCNTL) +check_function_exists("poll" HAS_POLL) +check_function_exists("getaddrinfo" HAS_GETADDRINFO) +check_function_exists("getnameinfo" HAS_GETNAMEINFO) +check_function_exists("gethostbyname_r" HAS_GETHOSTBYNAME_R) +check_function_exists("gethostbyaddr_r" HAS_GETHOSTBYADDR_R) +check_function_exists("inet_pton" HAS_INET_PTON) +check_function_exists("inet_ntop" HAS_INET_NTOP) +check_struct_has_member("struct msghdr" "msg_flags" "sys/types.h;sys/socket.h" HAS_MSGHDR_FLAGS) +set(CMAKE_EXTRA_INCLUDE_FILES "sys/types.h" "sys/socket.h") +check_type_size("socklen_t" HAS_SOCKLEN_T BUILTIN_TYPES_ONLY) +unset(CMAKE_EXTRA_INCLUDE_FILES) +if(MSVC) + add_definitions(-W3) +else() + add_definitions(-Wno-error) +endif() + +if(HAS_FCNTL) + add_definitions(-DHAS_FCNTL=1) +endif() +if(HAS_POLL) + add_definitions(-DHAS_POLL=1) +endif() +if(HAS_GETNAMEINFO) + add_definitions(-DHAS_GETNAMEINFO=1) +endif() +if(HAS_GETADDRINFO) + add_definitions(-DHAS_GETADDRINFO=1) +endif() +if(HAS_GETHOSTBYNAME_R) + add_definitions(-DHAS_GETHOSTBYNAME_R=1) +endif() +if(HAS_GETHOSTBYADDR_R) + add_definitions(-DHAS_GETHOSTBYADDR_R=1) +endif() +if(HAS_INET_PTON) + add_definitions(-DHAS_INET_PTON=1) +endif() +if(HAS_INET_NTOP) + add_definitions(-DHAS_INET_NTOP=1) +endif() +if(HAS_MSGHDR_FLAGS) + add_definitions(-DHAS_MSGHDR_FLAGS=1) +endif() +if(HAS_SOCKLEN_T) + add_definitions(-DHAS_SOCKLEN_T=1) +endif() + +include_directories(${PROJECT_SOURCE_DIR}/include) + +set(INCLUDE_FILES_PREFIX include/enet) +set(INCLUDE_FILES + ${INCLUDE_FILES_PREFIX}/callbacks.h + ${INCLUDE_FILES_PREFIX}/enet.h + ${INCLUDE_FILES_PREFIX}/list.h + ${INCLUDE_FILES_PREFIX}/protocol.h + ${INCLUDE_FILES_PREFIX}/time.h + ${INCLUDE_FILES_PREFIX}/types.h + ${INCLUDE_FILES_PREFIX}/unix.h + ${INCLUDE_FILES_PREFIX}/utility.h + ${INCLUDE_FILES_PREFIX}/win32.h +) + +set(SOURCE_FILES + callbacks.c + compress.c + host.c + list.c + packet.c + peer.c + protocol.c + unix.c + win32.c) + +source_group(include FILES ${INCLUDE_FILES}) +source_group(source FILES ${SOURCE_FILES}) + +if(WIN32 AND BUILD_SHARED_LIBS AND (MSVC OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")) + add_definitions(-DENET_DLL=1) + add_definitions(-DENET_BUILDING_LIB) +endif() + +add_library(enet + ${INCLUDE_FILES} + ${SOURCE_FILES} +) + +if (WIN32) + target_link_libraries(enet winmm ws2_32) +endif() + +include(GNUInstallDirs) +install(TARGETS enet + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} +) +install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/enet + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} +) diff --git a/pkg/enet/enet/LICENSE b/pkg/enet/enet/LICENSE new file mode 100644 index 000000000..ec14b3f09 --- /dev/null +++ b/pkg/enet/enet/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2002-2024 Lee Salzman + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/pkg/enet/enet/Makefile b/pkg/enet/enet/Makefile new file mode 100644 index 000000000..a2bc1af4a --- /dev/null +++ b/pkg/enet/enet/Makefile @@ -0,0 +1,22 @@ +CFLAGS=-O3 -fomit-frame-pointer +override CFLAGS:= $(CFLAGS) -Iinclude + +OBJS= \ + callbacks.o \ + compress.o \ + host.o \ + list.o \ + packet.o \ + peer.o \ + protocol.o \ + unix.o \ + win32.o + +libenet.a: $(OBJS) + $(AR) rcs $@ $(OBJS) + +default: libenet.a + +clean: + -$(RM) libenet.a $(OBJS) + diff --git a/pkg/enet/enet/Makefile.am b/pkg/enet/enet/Makefile.am new file mode 100644 index 000000000..ddd69cfb6 --- /dev/null +++ b/pkg/enet/enet/Makefile.am @@ -0,0 +1,22 @@ +pkgconfigdir = $(libdir)/pkgconfig +nodist_pkgconfig_DATA = libenet.pc + +enetincludedir=$(includedir)/enet +enetinclude_HEADERS = \ + include/enet/callbacks.h \ + include/enet/enet.h \ + include/enet/list.h \ + include/enet/protocol.h \ + include/enet/time.h \ + include/enet/types.h \ + include/enet/unix.h \ + include/enet/utility.h \ + include/enet/win32.h + +lib_LTLIBRARIES = libenet.la +libenet_la_SOURCES = callbacks.c compress.c host.c list.c packet.c peer.c protocol.c unix.c win32.c +# see info '(libtool) Updating version info' before making a release +libenet_la_LDFLAGS = $(AM_LDFLAGS) -version-info 7:6:0 +AM_CPPFLAGS = -I$(top_srcdir)/include + +ACLOCAL_AMFLAGS = -Im4 diff --git a/pkg/enet/enet/Makefile.in b/pkg/enet/enet/Makefile.in new file mode 100644 index 000000000..fa8d8b841 --- /dev/null +++ b/pkg/enet/enet/Makefile.in @@ -0,0 +1,936 @@ +# Makefile.in generated by automake 1.16.5 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2021 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + + + +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = . +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/libtool.m4 \ + $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ + $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \ + $(am__configure_deps) $(enetinclude_HEADERS) \ + $(am__DIST_COMMON) +am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ + configure.lineno config.status.lineno +mkinstalldirs = $(install_sh) -d +CONFIG_CLEAN_FILES = libenet.pc +CONFIG_CLEAN_VPATH_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(pkgconfigdir)" \ + "$(DESTDIR)$(enetincludedir)" +LTLIBRARIES = $(lib_LTLIBRARIES) +libenet_la_LIBADD = +am_libenet_la_OBJECTS = callbacks.lo compress.lo host.lo list.lo \ + packet.lo peer.lo protocol.lo unix.lo win32.lo +libenet_la_OBJECTS = $(am_libenet_la_OBJECTS) +AM_V_lt = $(am__v_lt_@AM_V@) +am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) +am__v_lt_0 = --silent +am__v_lt_1 = +libenet_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ + $(libenet_la_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +DEFAULT_INCLUDES = -I.@am__isrc@ +depcomp = $(SHELL) $(top_srcdir)/depcomp +am__maybe_remake_depfiles = depfiles +am__depfiles_remade = ./$(DEPDIR)/callbacks.Plo \ + ./$(DEPDIR)/compress.Plo ./$(DEPDIR)/host.Plo \ + ./$(DEPDIR)/list.Plo ./$(DEPDIR)/packet.Plo \ + ./$(DEPDIR)/peer.Plo ./$(DEPDIR)/protocol.Plo \ + ./$(DEPDIR)/unix.Plo ./$(DEPDIR)/win32.Plo +am__mv = mv -f +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CFLAGS) $(CFLAGS) +AM_V_CC = $(am__v_CC_@AM_V@) +am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) +am__v_CC_0 = @echo " CC " $@; +am__v_CC_1 = +CCLD = $(CC) +LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ + $(AM_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_CCLD = $(am__v_CCLD_@AM_V@) +am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) +am__v_CCLD_0 = @echo " CCLD " $@; +am__v_CCLD_1 = +SOURCES = $(libenet_la_SOURCES) +DIST_SOURCES = $(libenet_la_SOURCES) +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(nodist_pkgconfig_DATA) +HEADERS = $(enetinclude_HEADERS) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +AM_RECURSIVE_TARGETS = cscope +am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/libenet.pc.in \ + ChangeLog README compile config.guess config.sub depcomp \ + install-sh ltmain.sh missing +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +distdir = $(PACKAGE)-$(VERSION) +top_distdir = $(distdir) +am__remove_distdir = \ + if test -d "$(distdir)"; then \ + find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \ + && rm -rf "$(distdir)" \ + || { sleep 5 && rm -rf "$(distdir)"; }; \ + else :; fi +am__post_remove_distdir = $(am__remove_distdir) +DIST_ARCHIVES = $(distdir).tar.gz +GZIP_ENV = --best +DIST_TARGETS = dist-gzip +# Exists only to be overridden by the user if desired. +AM_DISTCHECK_DVI_TARGET = dvi +distuninstallcheck_listfiles = find . -type f -print +am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \ + | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$' +distcleancheck_listfiles = find . -type f -print +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPPFLAGS = @CPPFLAGS@ +CSCOPE = @CSCOPE@ +CTAGS = @CTAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ETAGS = @ETAGS@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +FILECMD = @FILECMD@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +pkgconfigdir = $(libdir)/pkgconfig +nodist_pkgconfig_DATA = libenet.pc +enetincludedir = $(includedir)/enet +enetinclude_HEADERS = \ + include/enet/callbacks.h \ + include/enet/enet.h \ + include/enet/list.h \ + include/enet/protocol.h \ + include/enet/time.h \ + include/enet/types.h \ + include/enet/unix.h \ + include/enet/utility.h \ + include/enet/win32.h + +lib_LTLIBRARIES = libenet.la +libenet_la_SOURCES = callbacks.c compress.c host.c list.c packet.c peer.c protocol.c unix.c win32.c +# see info '(libtool) Updating version info' before making a release +libenet_la_LDFLAGS = $(AM_LDFLAGS) -version-info 7:6:0 +AM_CPPFLAGS = -I$(top_srcdir)/include +ACLOCAL_AMFLAGS = -Im4 +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +am--refresh: Makefile + @: +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \ + $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + echo ' $(SHELL) ./config.status'; \ + $(SHELL) ./config.status;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + $(SHELL) ./config.status --recheck + +$(top_srcdir)/configure: $(am__configure_deps) + $(am__cd) $(srcdir) && $(AUTOCONF) +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) +$(am__aclocal_m4_deps): +libenet.pc: $(top_builddir)/config.status $(srcdir)/libenet.pc.in + cd $(top_builddir) && $(SHELL) ./config.status $@ + +install-libLTLIBRARIES: $(lib_LTLIBRARIES) + @$(NORMAL_INSTALL) + @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ + list2=; for p in $$list; do \ + if test -f $$p; then \ + list2="$$list2 $$p"; \ + else :; fi; \ + done; \ + test -z "$$list2" || { \ + echo " $(MKDIR_P) '$(DESTDIR)$(libdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(libdir)" || exit 1; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ + } + +uninstall-libLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ + for p in $$list; do \ + $(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ + done + +clean-libLTLIBRARIES: + -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) + @list='$(lib_LTLIBRARIES)'; \ + locs=`for p in $$list; do echo $$p; done | \ + sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ + sort -u`; \ + test -z "$$locs" || { \ + echo rm -f $${locs}; \ + rm -f $${locs}; \ + } + +libenet.la: $(libenet_la_OBJECTS) $(libenet_la_DEPENDENCIES) $(EXTRA_libenet_la_DEPENDENCIES) + $(AM_V_CCLD)$(libenet_la_LINK) -rpath $(libdir) $(libenet_la_OBJECTS) $(libenet_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/callbacks.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compress.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/host.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/list.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/packet.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/peer.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/protocol.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unix.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/win32.Plo@am__quote@ # am--include-marker + +$(am__depfiles_remade): + @$(MKDIR_P) $(@D) + @echo '# dummy' >$@-t && $(am__mv) $@-t $@ + +am--depfiles: $(am__depfiles_remade) + +.c.o: +@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< + +.c.obj: +@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +distclean-libtool: + -rm -f libtool config.lt +install-nodist_pkgconfigDATA: $(nodist_pkgconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(nodist_pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pkgconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pkgconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgconfigdir)" || exit $$?; \ + done + +uninstall-nodist_pkgconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(nodist_pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(pkgconfigdir)'; $(am__uninstall_files_from_dir) +install-enetincludeHEADERS: $(enetinclude_HEADERS) + @$(NORMAL_INSTALL) + @list='$(enetinclude_HEADERS)'; test -n "$(enetincludedir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(enetincludedir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(enetincludedir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(enetincludedir)'"; \ + $(INSTALL_HEADER) $$files "$(DESTDIR)$(enetincludedir)" || exit $$?; \ + done + +uninstall-enetincludeHEADERS: + @$(NORMAL_UNINSTALL) + @list='$(enetinclude_HEADERS)'; test -n "$(enetincludedir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(enetincludedir)'; $(am__uninstall_files_from_dir) + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-am +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-am + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscope: cscope.files + test ! -s cscope.files \ + || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS) +clean-cscope: + -rm -f cscope.files +cscope.files: clean-cscope cscopelist +cscopelist: cscopelist-am + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + -rm -f cscope.out cscope.in.out cscope.po.out cscope.files +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am + +distdir-am: $(DISTFILES) + $(am__remove_distdir) + test -d "$(distdir)" || mkdir "$(distdir)" + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + -test -n "$(am__skip_mode_fix)" \ + || find "$(distdir)" -type d ! -perm -755 \ + -exec chmod u+rwx,go+rx {} \; -o \ + ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ + || chmod -R a+r "$(distdir)" +dist-gzip: distdir + tardir=$(distdir) && $(am__tar) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).tar.gz + $(am__post_remove_distdir) + +dist-bzip2: distdir + tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2 + $(am__post_remove_distdir) + +dist-lzip: distdir + tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz + $(am__post_remove_distdir) + +dist-xz: distdir + tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz + $(am__post_remove_distdir) + +dist-zstd: distdir + tardir=$(distdir) && $(am__tar) | zstd -c $${ZSTD_CLEVEL-$${ZSTD_OPT--19}} >$(distdir).tar.zst + $(am__post_remove_distdir) + +dist-tarZ: distdir + @echo WARNING: "Support for distribution archives compressed with" \ + "legacy program 'compress' is deprecated." >&2 + @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 + tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z + $(am__post_remove_distdir) + +dist-shar: distdir + @echo WARNING: "Support for shar distribution archives is" \ + "deprecated." >&2 + @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 + shar $(distdir) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).shar.gz + $(am__post_remove_distdir) + +dist-zip: distdir + -rm -f $(distdir).zip + zip -rq $(distdir).zip $(distdir) + $(am__post_remove_distdir) + +dist dist-all: + $(MAKE) $(AM_MAKEFLAGS) $(DIST_TARGETS) am__post_remove_distdir='@:' + $(am__post_remove_distdir) + +# This target untars the dist file and tries a VPATH configuration. Then +# it guarantees that the distribution is self-contained by making another +# tarfile. +distcheck: dist + case '$(DIST_ARCHIVES)' in \ + *.tar.gz*) \ + eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).tar.gz | $(am__untar) ;;\ + *.tar.bz2*) \ + bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\ + *.tar.lz*) \ + lzip -dc $(distdir).tar.lz | $(am__untar) ;;\ + *.tar.xz*) \ + xz -dc $(distdir).tar.xz | $(am__untar) ;;\ + *.tar.Z*) \ + uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ + *.shar.gz*) \ + eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).shar.gz | unshar ;;\ + *.zip*) \ + unzip $(distdir).zip ;;\ + *.tar.zst*) \ + zstd -dc $(distdir).tar.zst | $(am__untar) ;;\ + esac + chmod -R a-w $(distdir) + chmod u+w $(distdir) + mkdir $(distdir)/_build $(distdir)/_build/sub $(distdir)/_inst + chmod a-w $(distdir) + test -d $(distdir)/_build || exit 0; \ + dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ + && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ + && am__cwd=`pwd` \ + && $(am__cd) $(distdir)/_build/sub \ + && ../../configure \ + $(AM_DISTCHECK_CONFIGURE_FLAGS) \ + $(DISTCHECK_CONFIGURE_FLAGS) \ + --srcdir=../.. --prefix="$$dc_install_base" \ + && $(MAKE) $(AM_MAKEFLAGS) \ + && $(MAKE) $(AM_MAKEFLAGS) $(AM_DISTCHECK_DVI_TARGET) \ + && $(MAKE) $(AM_MAKEFLAGS) check \ + && $(MAKE) $(AM_MAKEFLAGS) install \ + && $(MAKE) $(AM_MAKEFLAGS) installcheck \ + && $(MAKE) $(AM_MAKEFLAGS) uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ + distuninstallcheck \ + && chmod -R a-w "$$dc_install_base" \ + && ({ \ + (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ + distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ + } || { rm -rf "$$dc_destdir"; exit 1; }) \ + && rm -rf "$$dc_destdir" \ + && $(MAKE) $(AM_MAKEFLAGS) dist \ + && rm -rf $(DIST_ARCHIVES) \ + && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \ + && cd "$$am__cwd" \ + || exit 1 + $(am__post_remove_distdir) + @(echo "$(distdir) archives ready for distribution: "; \ + list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ + sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' +distuninstallcheck: + @test -n '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: trying to run $@ with an empty' \ + '$$(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + $(am__cd) '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \ + || { echo "ERROR: files left after uninstall:" ; \ + if test -n "$(DESTDIR)"; then \ + echo " (check DESTDIR support)"; \ + fi ; \ + $(distuninstallcheck_listfiles) ; \ + exit 1; } >&2 +distcleancheck: distclean + @if test '$(srcdir)' = . ; then \ + echo "ERROR: distcleancheck can only run from a VPATH build" ; \ + exit 1 ; \ + fi + @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ + || { echo "ERROR: files left in build directory after distclean:" ; \ + $(distcleancheck_listfiles) ; \ + exit 1; } >&2 +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) $(DATA) $(HEADERS) +installdirs: + for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(pkgconfigdir)" "$(DESTDIR)$(enetincludedir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ + mostlyclean-am + +distclean: distclean-am + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -f ./$(DEPDIR)/callbacks.Plo + -rm -f ./$(DEPDIR)/compress.Plo + -rm -f ./$(DEPDIR)/host.Plo + -rm -f ./$(DEPDIR)/list.Plo + -rm -f ./$(DEPDIR)/packet.Plo + -rm -f ./$(DEPDIR)/peer.Plo + -rm -f ./$(DEPDIR)/protocol.Plo + -rm -f ./$(DEPDIR)/unix.Plo + -rm -f ./$(DEPDIR)/win32.Plo + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-libtool distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-enetincludeHEADERS \ + install-nodist_pkgconfigDATA + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: install-libLTLIBRARIES + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -rf $(top_srcdir)/autom4te.cache + -rm -f ./$(DEPDIR)/callbacks.Plo + -rm -f ./$(DEPDIR)/compress.Plo + -rm -f ./$(DEPDIR)/host.Plo + -rm -f ./$(DEPDIR)/list.Plo + -rm -f ./$(DEPDIR)/packet.Plo + -rm -f ./$(DEPDIR)/peer.Plo + -rm -f ./$(DEPDIR)/protocol.Plo + -rm -f ./$(DEPDIR)/unix.Plo + -rm -f ./$(DEPDIR)/win32.Plo + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-enetincludeHEADERS uninstall-libLTLIBRARIES \ + uninstall-nodist_pkgconfigDATA + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS TAGS all all-am am--depfiles am--refresh check \ + check-am clean clean-cscope clean-generic clean-libLTLIBRARIES \ + clean-libtool cscope cscopelist-am ctags ctags-am dist \ + dist-all dist-bzip2 dist-gzip dist-lzip dist-shar dist-tarZ \ + dist-xz dist-zip dist-zstd distcheck distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distcleancheck distdir distuninstallcheck dvi \ + dvi-am html html-am info info-am install install-am \ + install-data install-data-am install-dvi install-dvi-am \ + install-enetincludeHEADERS install-exec install-exec-am \ + install-html install-html-am install-info install-info-am \ + install-libLTLIBRARIES install-man \ + install-nodist_pkgconfigDATA install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags tags-am uninstall uninstall-am \ + uninstall-enetincludeHEADERS uninstall-libLTLIBRARIES \ + uninstall-nodist_pkgconfigDATA + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/pkg/enet/enet/README b/pkg/enet/enet/README new file mode 100644 index 000000000..3b6318b22 --- /dev/null +++ b/pkg/enet/enet/README @@ -0,0 +1,15 @@ +Please visit the ENet homepage at http://sauerbraten.org/enet/ for installation +and usage instructions. + +If you obtained this package from github, the quick description on how to build +is: + +# Generate the build system. + +autoreconf -vfi + +# Compile and install the library. + +./configure && make && make install + + diff --git a/pkg/enet/enet/aclocal.m4 b/pkg/enet/enet/aclocal.m4 new file mode 100644 index 000000000..08ea625f4 --- /dev/null +++ b/pkg/enet/enet/aclocal.m4 @@ -0,0 +1,1155 @@ +# generated automatically by aclocal 1.16.5 -*- Autoconf -*- + +# Copyright (C) 1996-2021 Free Software Foundation, Inc. + +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])]) +m4_ifndef([AC_AUTOCONF_VERSION], + [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl +m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.71],, +[m4_warning([this file was generated for autoconf 2.71. +You have another version of autoconf. It may work, but is not guaranteed to. +If you have problems, you may need to regenerate the build system entirely. +To do so, use the procedure documented by the package, typically 'autoreconf'.])]) + +# Copyright (C) 2002-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_AUTOMAKE_VERSION(VERSION) +# ---------------------------- +# Automake X.Y traces this macro to ensure aclocal.m4 has been +# generated from the m4 files accompanying Automake X.Y. +# (This private macro should not be called outside this file.) +AC_DEFUN([AM_AUTOMAKE_VERSION], +[am__api_version='1.16' +dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to +dnl require some minimum version. Point them to the right macro. +m4_if([$1], [1.16.5], [], + [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl +]) + +# _AM_AUTOCONF_VERSION(VERSION) +# ----------------------------- +# aclocal traces this macro to find the Autoconf version. +# This is a private macro too. Using m4_define simplifies +# the logic in aclocal, which can simply ignore this definition. +m4_define([_AM_AUTOCONF_VERSION], []) + +# AM_SET_CURRENT_AUTOMAKE_VERSION +# ------------------------------- +# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. +# This function is AC_REQUIREd by AM_INIT_AUTOMAKE. +AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], +[AM_AUTOMAKE_VERSION([1.16.5])dnl +m4_ifndef([AC_AUTOCONF_VERSION], + [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl +_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) + +# AM_AUX_DIR_EXPAND -*- Autoconf -*- + +# Copyright (C) 2001-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets +# $ac_aux_dir to '$srcdir/foo'. In other projects, it is set to +# '$srcdir', '$srcdir/..', or '$srcdir/../..'. +# +# Of course, Automake must honor this variable whenever it calls a +# tool from the auxiliary directory. The problem is that $srcdir (and +# therefore $ac_aux_dir as well) can be either absolute or relative, +# depending on how configure is run. This is pretty annoying, since +# it makes $ac_aux_dir quite unusable in subdirectories: in the top +# source directory, any form will work fine, but in subdirectories a +# relative path needs to be adjusted first. +# +# $ac_aux_dir/missing +# fails when called from a subdirectory if $ac_aux_dir is relative +# $top_srcdir/$ac_aux_dir/missing +# fails if $ac_aux_dir is absolute, +# fails when called from a subdirectory in a VPATH build with +# a relative $ac_aux_dir +# +# The reason of the latter failure is that $top_srcdir and $ac_aux_dir +# are both prefixed by $srcdir. In an in-source build this is usually +# harmless because $srcdir is '.', but things will broke when you +# start a VPATH build or use an absolute $srcdir. +# +# So we could use something similar to $top_srcdir/$ac_aux_dir/missing, +# iff we strip the leading $srcdir from $ac_aux_dir. That would be: +# am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` +# and then we would define $MISSING as +# MISSING="\${SHELL} $am_aux_dir/missing" +# This will work as long as MISSING is not called from configure, because +# unfortunately $(top_srcdir) has no meaning in configure. +# However there are other variables, like CC, which are often used in +# configure, and could therefore not use this "fixed" $ac_aux_dir. +# +# Another solution, used here, is to always expand $ac_aux_dir to an +# absolute PATH. The drawback is that using absolute paths prevent a +# configured tree to be moved without reconfiguration. + +AC_DEFUN([AM_AUX_DIR_EXPAND], +[AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl +# Expand $ac_aux_dir to an absolute path. +am_aux_dir=`cd "$ac_aux_dir" && pwd` +]) + +# AM_CONDITIONAL -*- Autoconf -*- + +# Copyright (C) 1997-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_CONDITIONAL(NAME, SHELL-CONDITION) +# ------------------------------------- +# Define a conditional. +AC_DEFUN([AM_CONDITIONAL], +[AC_PREREQ([2.52])dnl + m4_if([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])], + [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl +AC_SUBST([$1_TRUE])dnl +AC_SUBST([$1_FALSE])dnl +_AM_SUBST_NOTMAKE([$1_TRUE])dnl +_AM_SUBST_NOTMAKE([$1_FALSE])dnl +m4_define([_AM_COND_VALUE_$1], [$2])dnl +if $2; then + $1_TRUE= + $1_FALSE='#' +else + $1_TRUE='#' + $1_FALSE= +fi +AC_CONFIG_COMMANDS_PRE( +[if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then + AC_MSG_ERROR([[conditional "$1" was never defined. +Usually this means the macro was only invoked conditionally.]]) +fi])]) + +# Copyright (C) 1999-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + + +# There are a few dirty hacks below to avoid letting 'AC_PROG_CC' be +# written in clear, in which case automake, when reading aclocal.m4, +# will think it sees a *use*, and therefore will trigger all it's +# C support machinery. Also note that it means that autoscan, seeing +# CC etc. in the Makefile, will ask for an AC_PROG_CC use... + + +# _AM_DEPENDENCIES(NAME) +# ---------------------- +# See how the compiler implements dependency checking. +# NAME is "CC", "CXX", "OBJC", "OBJCXX", "UPC", or "GJC". +# We try a few techniques and use that to set a single cache variable. +# +# We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was +# modified to invoke _AM_DEPENDENCIES(CC); we would have a circular +# dependency, and given that the user is not expected to run this macro, +# just rely on AC_PROG_CC. +AC_DEFUN([_AM_DEPENDENCIES], +[AC_REQUIRE([AM_SET_DEPDIR])dnl +AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl +AC_REQUIRE([AM_MAKE_INCLUDE])dnl +AC_REQUIRE([AM_DEP_TRACK])dnl + +m4_if([$1], [CC], [depcc="$CC" am_compiler_list=], + [$1], [CXX], [depcc="$CXX" am_compiler_list=], + [$1], [OBJC], [depcc="$OBJC" am_compiler_list='gcc3 gcc'], + [$1], [OBJCXX], [depcc="$OBJCXX" am_compiler_list='gcc3 gcc'], + [$1], [UPC], [depcc="$UPC" am_compiler_list=], + [$1], [GCJ], [depcc="$GCJ" am_compiler_list='gcc3 gcc'], + [depcc="$$1" am_compiler_list=]) + +AC_CACHE_CHECK([dependency style of $depcc], + [am_cv_$1_dependencies_compiler_type], +[if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named 'D' -- because '-MD' means "put the output + # in D". + rm -rf conftest.dir + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_$1_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp` + fi + am__universal=false + m4_case([$1], [CC], + [case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac], + [CXX], + [case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac]) + + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with + # Solaris 10 /bin/sh. + echo '/* dummy */' > sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + # We check with '-c' and '-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle '-M -o', and we need to detect this. Also, some Intel + # versions had trouble with output in subdirs. + am__obj=sub/conftest.${OBJEXT-o} + am__minus_obj="-o $am__obj" + case $depmode in + gcc) + # This depmode causes a compiler race in universal mode. + test "$am__universal" = false || continue + ;; + nosideeffect) + # After this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested. + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + msvc7 | msvc7msys | msvisualcpp | msvcmsys) + # This compiler won't grok '-c -o', but also, the minuso test has + # not run yet. These depmodes are late enough in the game, and + # so weak that their functioning should not be impacted. + am__obj=conftest.${OBJEXT-o} + am__minus_obj= + ;; + none) break ;; + esac + if depmode=$depmode \ + source=sub/conftest.c object=$am__obj \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep $am__obj sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_$1_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_$1_dependencies_compiler_type=none +fi +]) +AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type]) +AM_CONDITIONAL([am__fastdep$1], [ + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_$1_dependencies_compiler_type" = gcc3]) +]) + + +# AM_SET_DEPDIR +# ------------- +# Choose a directory name for dependency files. +# This macro is AC_REQUIREd in _AM_DEPENDENCIES. +AC_DEFUN([AM_SET_DEPDIR], +[AC_REQUIRE([AM_SET_LEADING_DOT])dnl +AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl +]) + + +# AM_DEP_TRACK +# ------------ +AC_DEFUN([AM_DEP_TRACK], +[AC_ARG_ENABLE([dependency-tracking], [dnl +AS_HELP_STRING( + [--enable-dependency-tracking], + [do not reject slow dependency extractors]) +AS_HELP_STRING( + [--disable-dependency-tracking], + [speeds up one-time build])]) +if test "x$enable_dependency_tracking" != xno; then + am_depcomp="$ac_aux_dir/depcomp" + AMDEPBACKSLASH='\' + am__nodep='_no' +fi +AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno]) +AC_SUBST([AMDEPBACKSLASH])dnl +_AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl +AC_SUBST([am__nodep])dnl +_AM_SUBST_NOTMAKE([am__nodep])dnl +]) + +# Generate code to set up dependency tracking. -*- Autoconf -*- + +# Copyright (C) 1999-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# _AM_OUTPUT_DEPENDENCY_COMMANDS +# ------------------------------ +AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], +[{ + # Older Autoconf quotes --file arguments for eval, but not when files + # are listed without --file. Let's play safe and only enable the eval + # if we detect the quoting. + # TODO: see whether this extra hack can be removed once we start + # requiring Autoconf 2.70 or later. + AS_CASE([$CONFIG_FILES], + [*\'*], [eval set x "$CONFIG_FILES"], + [*], [set x $CONFIG_FILES]) + shift + # Used to flag and report bootstrapping failures. + am_rc=0 + for am_mf + do + # Strip MF so we end up with the name of the file. + am_mf=`AS_ECHO(["$am_mf"]) | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile which includes + # dependency-tracking related rules and includes. + # Grep'ing the whole file directly is not great: AIX grep has a line + # limit of 2048, but all sed's we know have understand at least 4000. + sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \ + || continue + am_dirpart=`AS_DIRNAME(["$am_mf"])` + am_filepart=`AS_BASENAME(["$am_mf"])` + AM_RUN_LOG([cd "$am_dirpart" \ + && sed -e '/# am--include-marker/d' "$am_filepart" \ + | $MAKE -f - am--depfiles]) || am_rc=$? + done + if test $am_rc -ne 0; then + AC_MSG_FAILURE([Something went wrong bootstrapping makefile fragments + for automatic dependency tracking. If GNU make was not used, consider + re-running the configure script with MAKE="gmake" (or whatever is + necessary). You can also try re-running configure with the + '--disable-dependency-tracking' option to at least be able to build + the package (albeit without support for automatic dependency tracking).]) + fi + AS_UNSET([am_dirpart]) + AS_UNSET([am_filepart]) + AS_UNSET([am_mf]) + AS_UNSET([am_rc]) + rm -f conftest-deps.mk +} +])# _AM_OUTPUT_DEPENDENCY_COMMANDS + + +# AM_OUTPUT_DEPENDENCY_COMMANDS +# ----------------------------- +# This macro should only be invoked once -- use via AC_REQUIRE. +# +# This code is only required when automatic dependency tracking is enabled. +# This creates each '.Po' and '.Plo' makefile fragment that we'll need in +# order to bootstrap the dependency handling code. +AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], +[AC_CONFIG_COMMANDS([depfiles], + [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS], + [AMDEP_TRUE="$AMDEP_TRUE" MAKE="${MAKE-make}"])]) + +# Do all the work for Automake. -*- Autoconf -*- + +# Copyright (C) 1996-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This macro actually does too much. Some checks are only needed if +# your package does certain things. But this isn't really a big deal. + +dnl Redefine AC_PROG_CC to automatically invoke _AM_PROG_CC_C_O. +m4_define([AC_PROG_CC], +m4_defn([AC_PROG_CC]) +[_AM_PROG_CC_C_O +]) + +# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) +# AM_INIT_AUTOMAKE([OPTIONS]) +# ----------------------------------------------- +# The call with PACKAGE and VERSION arguments is the old style +# call (pre autoconf-2.50), which is being phased out. PACKAGE +# and VERSION should now be passed to AC_INIT and removed from +# the call to AM_INIT_AUTOMAKE. +# We support both call styles for the transition. After +# the next Automake release, Autoconf can make the AC_INIT +# arguments mandatory, and then we can depend on a new Autoconf +# release and drop the old call support. +AC_DEFUN([AM_INIT_AUTOMAKE], +[AC_PREREQ([2.65])dnl +m4_ifdef([_$0_ALREADY_INIT], + [m4_fatal([$0 expanded multiple times +]m4_defn([_$0_ALREADY_INIT]))], + [m4_define([_$0_ALREADY_INIT], m4_expansion_stack)])dnl +dnl Autoconf wants to disallow AM_ names. We explicitly allow +dnl the ones we care about. +m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl +AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl +AC_REQUIRE([AC_PROG_INSTALL])dnl +if test "`cd $srcdir && pwd`" != "`pwd`"; then + # Use -I$(srcdir) only when $(srcdir) != ., so that make's output + # is not polluted with repeated "-I." + AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl + # test to see if srcdir already configured + if test -f $srcdir/config.status; then + AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) + fi +fi + +# test whether we have cygpath +if test -z "$CYGPATH_W"; then + if (cygpath --version) >/dev/null 2>/dev/null; then + CYGPATH_W='cygpath -w' + else + CYGPATH_W=echo + fi +fi +AC_SUBST([CYGPATH_W]) + +# Define the identity of the package. +dnl Distinguish between old-style and new-style calls. +m4_ifval([$2], +[AC_DIAGNOSE([obsolete], + [$0: two- and three-arguments forms are deprecated.]) +m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl + AC_SUBST([PACKAGE], [$1])dnl + AC_SUBST([VERSION], [$2])], +[_AM_SET_OPTIONS([$1])dnl +dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. +m4_if( + m4_ifset([AC_PACKAGE_NAME], [ok]):m4_ifset([AC_PACKAGE_VERSION], [ok]), + [ok:ok],, + [m4_fatal([AC_INIT should be called with package and version arguments])])dnl + AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl + AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl + +_AM_IF_OPTION([no-define],, +[AC_DEFINE_UNQUOTED([PACKAGE], ["$PACKAGE"], [Name of package]) + AC_DEFINE_UNQUOTED([VERSION], ["$VERSION"], [Version number of package])])dnl + +# Some tools Automake needs. +AC_REQUIRE([AM_SANITY_CHECK])dnl +AC_REQUIRE([AC_ARG_PROGRAM])dnl +AM_MISSING_PROG([ACLOCAL], [aclocal-${am__api_version}]) +AM_MISSING_PROG([AUTOCONF], [autoconf]) +AM_MISSING_PROG([AUTOMAKE], [automake-${am__api_version}]) +AM_MISSING_PROG([AUTOHEADER], [autoheader]) +AM_MISSING_PROG([MAKEINFO], [makeinfo]) +AC_REQUIRE([AM_PROG_INSTALL_SH])dnl +AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl +AC_REQUIRE([AC_PROG_MKDIR_P])dnl +# For better backward compatibility. To be removed once Automake 1.9.x +# dies out for good. For more background, see: +# +# +AC_SUBST([mkdir_p], ['$(MKDIR_P)']) +# We need awk for the "check" target (and possibly the TAP driver). The +# system "awk" is bad on some platforms. +AC_REQUIRE([AC_PROG_AWK])dnl +AC_REQUIRE([AC_PROG_MAKE_SET])dnl +AC_REQUIRE([AM_SET_LEADING_DOT])dnl +_AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], + [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], + [_AM_PROG_TAR([v7])])]) +_AM_IF_OPTION([no-dependencies],, +[AC_PROVIDE_IFELSE([AC_PROG_CC], + [_AM_DEPENDENCIES([CC])], + [m4_define([AC_PROG_CC], + m4_defn([AC_PROG_CC])[_AM_DEPENDENCIES([CC])])])dnl +AC_PROVIDE_IFELSE([AC_PROG_CXX], + [_AM_DEPENDENCIES([CXX])], + [m4_define([AC_PROG_CXX], + m4_defn([AC_PROG_CXX])[_AM_DEPENDENCIES([CXX])])])dnl +AC_PROVIDE_IFELSE([AC_PROG_OBJC], + [_AM_DEPENDENCIES([OBJC])], + [m4_define([AC_PROG_OBJC], + m4_defn([AC_PROG_OBJC])[_AM_DEPENDENCIES([OBJC])])])dnl +AC_PROVIDE_IFELSE([AC_PROG_OBJCXX], + [_AM_DEPENDENCIES([OBJCXX])], + [m4_define([AC_PROG_OBJCXX], + m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl +]) +# Variables for tags utilities; see am/tags.am +if test -z "$CTAGS"; then + CTAGS=ctags +fi +AC_SUBST([CTAGS]) +if test -z "$ETAGS"; then + ETAGS=etags +fi +AC_SUBST([ETAGS]) +if test -z "$CSCOPE"; then + CSCOPE=cscope +fi +AC_SUBST([CSCOPE]) + +AC_REQUIRE([AM_SILENT_RULES])dnl +dnl The testsuite driver may need to know about EXEEXT, so add the +dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This +dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below. +AC_CONFIG_COMMANDS_PRE(dnl +[m4_provide_if([_AM_COMPILER_EXEEXT], + [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl + +# POSIX will say in a future version that running "rm -f" with no argument +# is OK; and we want to be able to make that assumption in our Makefile +# recipes. So use an aggressive probe to check that the usage we want is +# actually supported "in the wild" to an acceptable degree. +# See automake bug#10828. +# To make any issue more visible, cause the running configure to be aborted +# by default if the 'rm' program in use doesn't match our expectations; the +# user can still override this though. +if rm -f && rm -fr && rm -rf; then : OK; else + cat >&2 <<'END' +Oops! + +Your 'rm' program seems unable to run without file operands specified +on the command line, even when the '-f' option is present. This is contrary +to the behaviour of most rm programs out there, and not conforming with +the upcoming POSIX standard: + +Please tell bug-automake@gnu.org about your system, including the value +of your $PATH and any error possibly output before this message. This +can help us improve future automake versions. + +END + if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then + echo 'Configuration will proceed anyway, since you have set the' >&2 + echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 + echo >&2 + else + cat >&2 <<'END' +Aborting the configuration process, to ensure you take notice of the issue. + +You can download and install GNU coreutils to get an 'rm' implementation +that behaves properly: . + +If you want to complete the configuration process using your problematic +'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM +to "yes", and re-run configure. + +END + AC_MSG_ERROR([Your 'rm' program is bad, sorry.]) + fi +fi +dnl The trailing newline in this macro's definition is deliberate, for +dnl backward compatibility and to allow trailing 'dnl'-style comments +dnl after the AM_INIT_AUTOMAKE invocation. See automake bug#16841. +]) + +dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion. Do not +dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further +dnl mangled by Autoconf and run in a shell conditional statement. +m4_define([_AC_COMPILER_EXEEXT], +m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])]) + +# When config.status generates a header, we must update the stamp-h file. +# This file resides in the same directory as the config header +# that is generated. The stamp files are numbered to have different names. + +# Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the +# loop where config.status creates the headers, so we can generate +# our stamp files there. +AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], +[# Compute $1's index in $config_headers. +_am_arg=$1 +_am_stamp_count=1 +for _am_header in $config_headers :; do + case $_am_header in + $_am_arg | $_am_arg:* ) + break ;; + * ) + _am_stamp_count=`expr $_am_stamp_count + 1` ;; + esac +done +echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) + +# Copyright (C) 2001-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_PROG_INSTALL_SH +# ------------------ +# Define $install_sh. +AC_DEFUN([AM_PROG_INSTALL_SH], +[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +if test x"${install_sh+set}" != xset; then + case $am_aux_dir in + *\ * | *\ *) + install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; + *) + install_sh="\${SHELL} $am_aux_dir/install-sh" + esac +fi +AC_SUBST([install_sh])]) + +# Copyright (C) 2003-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# Check whether the underlying file-system supports filenames +# with a leading dot. For instance MS-DOS doesn't. +AC_DEFUN([AM_SET_LEADING_DOT], +[rm -rf .tst 2>/dev/null +mkdir .tst 2>/dev/null +if test -d .tst; then + am__leading_dot=. +else + am__leading_dot=_ +fi +rmdir .tst 2>/dev/null +AC_SUBST([am__leading_dot])]) + +# Check to see how 'make' treats includes. -*- Autoconf -*- + +# Copyright (C) 2001-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_MAKE_INCLUDE() +# ----------------- +# Check whether make has an 'include' directive that can support all +# the idioms we need for our automatic dependency tracking code. +AC_DEFUN([AM_MAKE_INCLUDE], +[AC_MSG_CHECKING([whether ${MAKE-make} supports the include directive]) +cat > confinc.mk << 'END' +am__doit: + @echo this is the am__doit target >confinc.out +.PHONY: am__doit +END +am__include="#" +am__quote= +# BSD make does it like this. +echo '.include "confinc.mk" # ignored' > confmf.BSD +# Other make implementations (GNU, Solaris 10, AIX) do it like this. +echo 'include confinc.mk # ignored' > confmf.GNU +_am_result=no +for s in GNU BSD; do + AM_RUN_LOG([${MAKE-make} -f confmf.$s && cat confinc.out]) + AS_CASE([$?:`cat confinc.out 2>/dev/null`], + ['0:this is the am__doit target'], + [AS_CASE([$s], + [BSD], [am__include='.include' am__quote='"'], + [am__include='include' am__quote=''])]) + if test "$am__include" != "#"; then + _am_result="yes ($s style)" + break + fi +done +rm -f confinc.* confmf.* +AC_MSG_RESULT([${_am_result}]) +AC_SUBST([am__include])]) +AC_SUBST([am__quote])]) + +# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- + +# Copyright (C) 1997-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_MISSING_PROG(NAME, PROGRAM) +# ------------------------------ +AC_DEFUN([AM_MISSING_PROG], +[AC_REQUIRE([AM_MISSING_HAS_RUN]) +$1=${$1-"${am_missing_run}$2"} +AC_SUBST($1)]) + +# AM_MISSING_HAS_RUN +# ------------------ +# Define MISSING if not defined so far and test if it is modern enough. +# If it is, set am_missing_run to use it, otherwise, to nothing. +AC_DEFUN([AM_MISSING_HAS_RUN], +[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +AC_REQUIRE_AUX_FILE([missing])dnl +if test x"${MISSING+set}" != xset; then + MISSING="\${SHELL} '$am_aux_dir/missing'" +fi +# Use eval to expand $SHELL +if eval "$MISSING --is-lightweight"; then + am_missing_run="$MISSING " +else + am_missing_run= + AC_MSG_WARN(['missing' script is too old or missing]) +fi +]) + +# Helper functions for option handling. -*- Autoconf -*- + +# Copyright (C) 2001-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# _AM_MANGLE_OPTION(NAME) +# ----------------------- +AC_DEFUN([_AM_MANGLE_OPTION], +[[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) + +# _AM_SET_OPTION(NAME) +# -------------------- +# Set option NAME. Presently that only means defining a flag for this option. +AC_DEFUN([_AM_SET_OPTION], +[m4_define(_AM_MANGLE_OPTION([$1]), [1])]) + +# _AM_SET_OPTIONS(OPTIONS) +# ------------------------ +# OPTIONS is a space-separated list of Automake options. +AC_DEFUN([_AM_SET_OPTIONS], +[m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) + +# _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) +# ------------------------------------------- +# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. +AC_DEFUN([_AM_IF_OPTION], +[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) + +# Copyright (C) 1999-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# _AM_PROG_CC_C_O +# --------------- +# Like AC_PROG_CC_C_O, but changed for automake. We rewrite AC_PROG_CC +# to automatically call this. +AC_DEFUN([_AM_PROG_CC_C_O], +[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +AC_REQUIRE_AUX_FILE([compile])dnl +AC_LANG_PUSH([C])dnl +AC_CACHE_CHECK( + [whether $CC understands -c and -o together], + [am_cv_prog_cc_c_o], + [AC_LANG_CONFTEST([AC_LANG_PROGRAM([])]) + # Make sure it works both with $CC and with simple cc. + # Following AC_PROG_CC_C_O, we do the test twice because some + # compilers refuse to overwrite an existing .o file with -o, + # though they will create one. + am_cv_prog_cc_c_o=yes + for am_i in 1 2; do + if AM_RUN_LOG([$CC -c conftest.$ac_ext -o conftest2.$ac_objext]) \ + && test -f conftest2.$ac_objext; then + : OK + else + am_cv_prog_cc_c_o=no + break + fi + done + rm -f core conftest* + unset am_i]) +if test "$am_cv_prog_cc_c_o" != yes; then + # Losing compiler, so override with the script. + # FIXME: It is wrong to rewrite CC. + # But if we don't then we get into trouble of one sort or another. + # A longer-term fix would be to have automake use am__CC in this case, + # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" + CC="$am_aux_dir/compile $CC" +fi +AC_LANG_POP([C])]) + +# For backward compatibility. +AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])]) + +# Copyright (C) 2001-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_RUN_LOG(COMMAND) +# ------------------- +# Run COMMAND, save the exit status in ac_status, and log it. +# (This has been adapted from Autoconf's _AC_RUN_LOG macro.) +AC_DEFUN([AM_RUN_LOG], +[{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD + ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + (exit $ac_status); }]) + +# Check to make sure that the build environment is sane. -*- Autoconf -*- + +# Copyright (C) 1996-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_SANITY_CHECK +# --------------- +AC_DEFUN([AM_SANITY_CHECK], +[AC_MSG_CHECKING([whether build environment is sane]) +# Reject unsafe characters in $srcdir or the absolute working directory +# name. Accept space and tab only in the latter. +am_lf=' +' +case `pwd` in + *[[\\\"\#\$\&\'\`$am_lf]]*) + AC_MSG_ERROR([unsafe absolute working directory name]);; +esac +case $srcdir in + *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*) + AC_MSG_ERROR([unsafe srcdir value: '$srcdir']);; +esac + +# Do 'set' in a subshell so we don't clobber the current shell's +# arguments. Must try -L first in case configure is actually a +# symlink; some systems play weird games with the mod time of symlinks +# (eg FreeBSD returns the mod time of the symlink's containing +# directory). +if ( + am_has_slept=no + for am_try in 1 2; do + echo "timestamp, slept: $am_has_slept" > conftest.file + set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` + if test "$[*]" = "X"; then + # -L didn't work. + set X `ls -t "$srcdir/configure" conftest.file` + fi + if test "$[*]" != "X $srcdir/configure conftest.file" \ + && test "$[*]" != "X conftest.file $srcdir/configure"; then + + # If neither matched, then we have a broken ls. This can happen + # if, for instance, CONFIG_SHELL is bash and it inherits a + # broken ls alias from the environment. This has actually + # happened. Such a system could not be considered "sane". + AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken + alias in your environment]) + fi + if test "$[2]" = conftest.file || test $am_try -eq 2; then + break + fi + # Just in case. + sleep 1 + am_has_slept=yes + done + test "$[2]" = conftest.file + ) +then + # Ok. + : +else + AC_MSG_ERROR([newly created file is older than distributed files! +Check your system clock]) +fi +AC_MSG_RESULT([yes]) +# If we didn't sleep, we still need to ensure time stamps of config.status and +# generated files are strictly newer. +am_sleep_pid= +if grep 'slept: no' conftest.file >/dev/null 2>&1; then + ( sleep 1 ) & + am_sleep_pid=$! +fi +AC_CONFIG_COMMANDS_PRE( + [AC_MSG_CHECKING([that generated files are newer than configure]) + if test -n "$am_sleep_pid"; then + # Hide warnings about reused PIDs. + wait $am_sleep_pid 2>/dev/null + fi + AC_MSG_RESULT([done])]) +rm -f conftest.file +]) + +# Copyright (C) 2009-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_SILENT_RULES([DEFAULT]) +# -------------------------- +# Enable less verbose build rules; with the default set to DEFAULT +# ("yes" being less verbose, "no" or empty being verbose). +AC_DEFUN([AM_SILENT_RULES], +[AC_ARG_ENABLE([silent-rules], [dnl +AS_HELP_STRING( + [--enable-silent-rules], + [less verbose build output (undo: "make V=1")]) +AS_HELP_STRING( + [--disable-silent-rules], + [verbose build output (undo: "make V=0")])dnl +]) +case $enable_silent_rules in @%:@ ((( + yes) AM_DEFAULT_VERBOSITY=0;; + no) AM_DEFAULT_VERBOSITY=1;; + *) AM_DEFAULT_VERBOSITY=m4_if([$1], [yes], [0], [1]);; +esac +dnl +dnl A few 'make' implementations (e.g., NonStop OS and NextStep) +dnl do not support nested variable expansions. +dnl See automake bug#9928 and bug#10237. +am_make=${MAKE-make} +AC_CACHE_CHECK([whether $am_make supports nested variables], + [am_cv_make_support_nested_variables], + [if AS_ECHO([['TRUE=$(BAR$(V)) +BAR0=false +BAR1=true +V=1 +am__doit: + @$(TRUE) +.PHONY: am__doit']]) | $am_make -f - >/dev/null 2>&1; then + am_cv_make_support_nested_variables=yes +else + am_cv_make_support_nested_variables=no +fi]) +if test $am_cv_make_support_nested_variables = yes; then + dnl Using '$V' instead of '$(V)' breaks IRIX make. + AM_V='$(V)' + AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' +else + AM_V=$AM_DEFAULT_VERBOSITY + AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY +fi +AC_SUBST([AM_V])dnl +AM_SUBST_NOTMAKE([AM_V])dnl +AC_SUBST([AM_DEFAULT_V])dnl +AM_SUBST_NOTMAKE([AM_DEFAULT_V])dnl +AC_SUBST([AM_DEFAULT_VERBOSITY])dnl +AM_BACKSLASH='\' +AC_SUBST([AM_BACKSLASH])dnl +_AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl +]) + +# Copyright (C) 2001-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_PROG_INSTALL_STRIP +# --------------------- +# One issue with vendor 'install' (even GNU) is that you can't +# specify the program used to strip binaries. This is especially +# annoying in cross-compiling environments, where the build's strip +# is unlikely to handle the host's binaries. +# Fortunately install-sh will honor a STRIPPROG variable, so we +# always use install-sh in "make install-strip", and initialize +# STRIPPROG with the value of the STRIP variable (set by the user). +AC_DEFUN([AM_PROG_INSTALL_STRIP], +[AC_REQUIRE([AM_PROG_INSTALL_SH])dnl +# Installed binaries are usually stripped using 'strip' when the user +# run "make install-strip". However 'strip' might not be the right +# tool to use in cross-compilation environments, therefore Automake +# will honor the 'STRIP' environment variable to overrule this program. +dnl Don't test for $cross_compiling = yes, because it might be 'maybe'. +if test "$cross_compiling" != no; then + AC_CHECK_TOOL([STRIP], [strip], :) +fi +INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" +AC_SUBST([INSTALL_STRIP_PROGRAM])]) + +# Copyright (C) 2006-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# _AM_SUBST_NOTMAKE(VARIABLE) +# --------------------------- +# Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in. +# This macro is traced by Automake. +AC_DEFUN([_AM_SUBST_NOTMAKE]) + +# AM_SUBST_NOTMAKE(VARIABLE) +# -------------------------- +# Public sister of _AM_SUBST_NOTMAKE. +AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) + +# Check how to create a tarball. -*- Autoconf -*- + +# Copyright (C) 2004-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# _AM_PROG_TAR(FORMAT) +# -------------------- +# Check how to create a tarball in format FORMAT. +# FORMAT should be one of 'v7', 'ustar', or 'pax'. +# +# Substitute a variable $(am__tar) that is a command +# writing to stdout a FORMAT-tarball containing the directory +# $tardir. +# tardir=directory && $(am__tar) > result.tar +# +# Substitute a variable $(am__untar) that extract such +# a tarball read from stdin. +# $(am__untar) < result.tar +# +AC_DEFUN([_AM_PROG_TAR], +[# Always define AMTAR for backward compatibility. Yes, it's still used +# in the wild :-( We should find a proper way to deprecate it ... +AC_SUBST([AMTAR], ['$${TAR-tar}']) + +# We'll loop over all known methods to create a tar archive until one works. +_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' + +m4_if([$1], [v7], + [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'], + + [m4_case([$1], + [ustar], + [# The POSIX 1988 'ustar' format is defined with fixed-size fields. + # There is notably a 21 bits limit for the UID and the GID. In fact, + # the 'pax' utility can hang on bigger UID/GID (see automake bug#8343 + # and bug#13588). + am_max_uid=2097151 # 2^21 - 1 + am_max_gid=$am_max_uid + # The $UID and $GID variables are not portable, so we need to resort + # to the POSIX-mandated id(1) utility. Errors in the 'id' calls + # below are definitely unexpected, so allow the users to see them + # (that is, avoid stderr redirection). + am_uid=`id -u || echo unknown` + am_gid=`id -g || echo unknown` + AC_MSG_CHECKING([whether UID '$am_uid' is supported by ustar format]) + if test $am_uid -le $am_max_uid; then + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + _am_tools=none + fi + AC_MSG_CHECKING([whether GID '$am_gid' is supported by ustar format]) + if test $am_gid -le $am_max_gid; then + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + _am_tools=none + fi], + + [pax], + [], + + [m4_fatal([Unknown tar format])]) + + AC_MSG_CHECKING([how to create a $1 tar archive]) + + # Go ahead even if we have the value already cached. We do so because we + # need to set the values for the 'am__tar' and 'am__untar' variables. + _am_tools=${am_cv_prog_tar_$1-$_am_tools} + + for _am_tool in $_am_tools; do + case $_am_tool in + gnutar) + for _am_tar in tar gnutar gtar; do + AM_RUN_LOG([$_am_tar --version]) && break + done + am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' + am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' + am__untar="$_am_tar -xf -" + ;; + plaintar) + # Must skip GNU tar: if it does not support --format= it doesn't create + # ustar tarball either. + (tar --version) >/dev/null 2>&1 && continue + am__tar='tar chf - "$$tardir"' + am__tar_='tar chf - "$tardir"' + am__untar='tar xf -' + ;; + pax) + am__tar='pax -L -x $1 -w "$$tardir"' + am__tar_='pax -L -x $1 -w "$tardir"' + am__untar='pax -r' + ;; + cpio) + am__tar='find "$$tardir" -print | cpio -o -H $1 -L' + am__tar_='find "$tardir" -print | cpio -o -H $1 -L' + am__untar='cpio -i -H $1 -d' + ;; + none) + am__tar=false + am__tar_=false + am__untar=false + ;; + esac + + # If the value was cached, stop now. We just wanted to have am__tar + # and am__untar set. + test -n "${am_cv_prog_tar_$1}" && break + + # tar/untar a dummy directory, and stop if the command works. + rm -rf conftest.dir + mkdir conftest.dir + echo GrepMe > conftest.dir/file + AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) + rm -rf conftest.dir + if test -s conftest.tar; then + AM_RUN_LOG([$am__untar /dev/null 2>&1 && break + fi + done + rm -rf conftest.dir + + AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) + AC_MSG_RESULT([$am_cv_prog_tar_$1])]) + +AC_SUBST([am__tar]) +AC_SUBST([am__untar]) +]) # _AM_PROG_TAR + +m4_include([m4/libtool.m4]) +m4_include([m4/ltoptions.m4]) +m4_include([m4/ltsugar.m4]) +m4_include([m4/ltversion.m4]) +m4_include([m4/lt~obsolete.m4]) diff --git a/pkg/enet/enet/callbacks.c b/pkg/enet/enet/callbacks.c new file mode 100644 index 000000000..b3990af1f --- /dev/null +++ b/pkg/enet/enet/callbacks.c @@ -0,0 +1,53 @@ +/** + @file callbacks.c + @brief ENet callback functions +*/ +#define ENET_BUILDING_LIB 1 +#include "enet/enet.h" + +static ENetCallbacks callbacks = { malloc, free, abort }; + +int +enet_initialize_with_callbacks (ENetVersion version, const ENetCallbacks * inits) +{ + if (version < ENET_VERSION_CREATE (1, 3, 0)) + return -1; + + if (inits -> malloc != NULL || inits -> free != NULL) + { + if (inits -> malloc == NULL || inits -> free == NULL) + return -1; + + callbacks.malloc = inits -> malloc; + callbacks.free = inits -> free; + } + + if (inits -> no_memory != NULL) + callbacks.no_memory = inits -> no_memory; + + return enet_initialize (); +} + +ENetVersion +enet_linked_version (void) +{ + return ENET_VERSION; +} + +void * +enet_malloc (size_t size) +{ + void * memory = callbacks.malloc (size); + + if (memory == NULL) + callbacks.no_memory (); + + return memory; +} + +void +enet_free (void * memory) +{ + callbacks.free (memory); +} + diff --git a/pkg/enet/enet/compile b/pkg/enet/enet/compile new file mode 100755 index 000000000..df363c8fb --- /dev/null +++ b/pkg/enet/enet/compile @@ -0,0 +1,348 @@ +#! /bin/sh +# Wrapper for compilers which do not understand '-c -o'. + +scriptversion=2018-03-07.03; # UTC + +# Copyright (C) 1999-2021 Free Software Foundation, Inc. +# Written by Tom Tromey . +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# This file is maintained in Automake, please report +# bugs to or send patches to +# . + +nl=' +' + +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent tools from complaining about whitespace usage. +IFS=" "" $nl" + +file_conv= + +# func_file_conv build_file lazy +# Convert a $build file to $host form and store it in $file +# Currently only supports Windows hosts. If the determined conversion +# type is listed in (the comma separated) LAZY, no conversion will +# take place. +func_file_conv () +{ + file=$1 + case $file in + / | /[!/]*) # absolute file, and not a UNC file + if test -z "$file_conv"; then + # lazily determine how to convert abs files + case `uname -s` in + MINGW*) + file_conv=mingw + ;; + CYGWIN* | MSYS*) + file_conv=cygwin + ;; + *) + file_conv=wine + ;; + esac + fi + case $file_conv/,$2, in + *,$file_conv,*) + ;; + mingw/*) + file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'` + ;; + cygwin/* | msys/*) + file=`cygpath -m "$file" || echo "$file"` + ;; + wine/*) + file=`winepath -w "$file" || echo "$file"` + ;; + esac + ;; + esac +} + +# func_cl_dashL linkdir +# Make cl look for libraries in LINKDIR +func_cl_dashL () +{ + func_file_conv "$1" + if test -z "$lib_path"; then + lib_path=$file + else + lib_path="$lib_path;$file" + fi + linker_opts="$linker_opts -LIBPATH:$file" +} + +# func_cl_dashl library +# Do a library search-path lookup for cl +func_cl_dashl () +{ + lib=$1 + found=no + save_IFS=$IFS + IFS=';' + for dir in $lib_path $LIB + do + IFS=$save_IFS + if $shared && test -f "$dir/$lib.dll.lib"; then + found=yes + lib=$dir/$lib.dll.lib + break + fi + if test -f "$dir/$lib.lib"; then + found=yes + lib=$dir/$lib.lib + break + fi + if test -f "$dir/lib$lib.a"; then + found=yes + lib=$dir/lib$lib.a + break + fi + done + IFS=$save_IFS + + if test "$found" != yes; then + lib=$lib.lib + fi +} + +# func_cl_wrapper cl arg... +# Adjust compile command to suit cl +func_cl_wrapper () +{ + # Assume a capable shell + lib_path= + shared=: + linker_opts= + for arg + do + if test -n "$eat"; then + eat= + else + case $1 in + -o) + # configure might choose to run compile as 'compile cc -o foo foo.c'. + eat=1 + case $2 in + *.o | *.[oO][bB][jJ]) + func_file_conv "$2" + set x "$@" -Fo"$file" + shift + ;; + *) + func_file_conv "$2" + set x "$@" -Fe"$file" + shift + ;; + esac + ;; + -I) + eat=1 + func_file_conv "$2" mingw + set x "$@" -I"$file" + shift + ;; + -I*) + func_file_conv "${1#-I}" mingw + set x "$@" -I"$file" + shift + ;; + -l) + eat=1 + func_cl_dashl "$2" + set x "$@" "$lib" + shift + ;; + -l*) + func_cl_dashl "${1#-l}" + set x "$@" "$lib" + shift + ;; + -L) + eat=1 + func_cl_dashL "$2" + ;; + -L*) + func_cl_dashL "${1#-L}" + ;; + -static) + shared=false + ;; + -Wl,*) + arg=${1#-Wl,} + save_ifs="$IFS"; IFS=',' + for flag in $arg; do + IFS="$save_ifs" + linker_opts="$linker_opts $flag" + done + IFS="$save_ifs" + ;; + -Xlinker) + eat=1 + linker_opts="$linker_opts $2" + ;; + -*) + set x "$@" "$1" + shift + ;; + *.cc | *.CC | *.cxx | *.CXX | *.[cC]++) + func_file_conv "$1" + set x "$@" -Tp"$file" + shift + ;; + *.c | *.cpp | *.CPP | *.lib | *.LIB | *.Lib | *.OBJ | *.obj | *.[oO]) + func_file_conv "$1" mingw + set x "$@" "$file" + shift + ;; + *) + set x "$@" "$1" + shift + ;; + esac + fi + shift + done + if test -n "$linker_opts"; then + linker_opts="-link$linker_opts" + fi + exec "$@" $linker_opts + exit 1 +} + +eat= + +case $1 in + '') + echo "$0: No command. Try '$0 --help' for more information." 1>&2 + exit 1; + ;; + -h | --h*) + cat <<\EOF +Usage: compile [--help] [--version] PROGRAM [ARGS] + +Wrapper for compilers which do not understand '-c -o'. +Remove '-o dest.o' from ARGS, run PROGRAM with the remaining +arguments, and rename the output as expected. + +If you are trying to build a whole package this is not the +right script to run: please start by reading the file 'INSTALL'. + +Report bugs to . +EOF + exit $? + ;; + -v | --v*) + echo "compile $scriptversion" + exit $? + ;; + cl | *[/\\]cl | cl.exe | *[/\\]cl.exe | \ + icl | *[/\\]icl | icl.exe | *[/\\]icl.exe ) + func_cl_wrapper "$@" # Doesn't return... + ;; +esac + +ofile= +cfile= + +for arg +do + if test -n "$eat"; then + eat= + else + case $1 in + -o) + # configure might choose to run compile as 'compile cc -o foo foo.c'. + # So we strip '-o arg' only if arg is an object. + eat=1 + case $2 in + *.o | *.obj) + ofile=$2 + ;; + *) + set x "$@" -o "$2" + shift + ;; + esac + ;; + *.c) + cfile=$1 + set x "$@" "$1" + shift + ;; + *) + set x "$@" "$1" + shift + ;; + esac + fi + shift +done + +if test -z "$ofile" || test -z "$cfile"; then + # If no '-o' option was seen then we might have been invoked from a + # pattern rule where we don't need one. That is ok -- this is a + # normal compilation that the losing compiler can handle. If no + # '.c' file was seen then we are probably linking. That is also + # ok. + exec "$@" +fi + +# Name of file we expect compiler to create. +cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'` + +# Create the lock directory. +# Note: use '[/\\:.-]' here to ensure that we don't use the same name +# that we are using for the .o file. Also, base the name on the expected +# object file name, since that is what matters with a parallel build. +lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d +while true; do + if mkdir "$lockdir" >/dev/null 2>&1; then + break + fi + sleep 1 +done +# FIXME: race condition here if user kills between mkdir and trap. +trap "rmdir '$lockdir'; exit 1" 1 2 15 + +# Run the compile. +"$@" +ret=$? + +if test -f "$cofile"; then + test "$cofile" = "$ofile" || mv "$cofile" "$ofile" +elif test -f "${cofile}bj"; then + test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile" +fi + +rmdir "$lockdir" +exit $ret + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC0" +# time-stamp-end: "; # UTC" +# End: diff --git a/pkg/enet/enet/compress.c b/pkg/enet/enet/compress.c new file mode 100644 index 000000000..784489a78 --- /dev/null +++ b/pkg/enet/enet/compress.c @@ -0,0 +1,654 @@ +/** + @file compress.c + @brief An adaptive order-2 PPM range coder +*/ +#define ENET_BUILDING_LIB 1 +#include +#include "enet/enet.h" + +typedef struct _ENetSymbol +{ + /* binary indexed tree of symbols */ + enet_uint8 value; + enet_uint8 count; + enet_uint16 under; + enet_uint16 left, right; + + /* context defined by this symbol */ + enet_uint16 symbols; + enet_uint16 escapes; + enet_uint16 total; + enet_uint16 parent; +} ENetSymbol; + +/* adaptation constants tuned aggressively for small packet sizes rather than large file compression */ +enum +{ + ENET_RANGE_CODER_TOP = 1<<24, + ENET_RANGE_CODER_BOTTOM = 1<<16, + + ENET_CONTEXT_SYMBOL_DELTA = 3, + ENET_CONTEXT_SYMBOL_MINIMUM = 1, + ENET_CONTEXT_ESCAPE_MINIMUM = 1, + + ENET_SUBCONTEXT_ORDER = 2, + ENET_SUBCONTEXT_SYMBOL_DELTA = 2, + ENET_SUBCONTEXT_ESCAPE_DELTA = 5 +}; + +/* context exclusion roughly halves compression speed, so disable for now */ +#undef ENET_CONTEXT_EXCLUSION + +typedef struct _ENetRangeCoder +{ + /* only allocate enough symbols for reasonable MTUs, would need to be larger for large file compression */ + ENetSymbol symbols[4096]; +} ENetRangeCoder; + +void * +enet_range_coder_create (void) +{ + ENetRangeCoder * rangeCoder = (ENetRangeCoder *) enet_malloc (sizeof (ENetRangeCoder)); + if (rangeCoder == NULL) + return NULL; + + return rangeCoder; +} + +void +enet_range_coder_destroy (void * context) +{ + ENetRangeCoder * rangeCoder = (ENetRangeCoder *) context; + if (rangeCoder == NULL) + return; + + enet_free (rangeCoder); +} + +#define ENET_SYMBOL_CREATE(symbol, value_, count_) \ +{ \ + symbol = & rangeCoder -> symbols [nextSymbol ++]; \ + symbol -> value = value_; \ + symbol -> count = count_; \ + symbol -> under = count_; \ + symbol -> left = 0; \ + symbol -> right = 0; \ + symbol -> symbols = 0; \ + symbol -> escapes = 0; \ + symbol -> total = 0; \ + symbol -> parent = 0; \ +} + +#define ENET_CONTEXT_CREATE(context, escapes_, minimum) \ +{ \ + ENET_SYMBOL_CREATE (context, 0, 0); \ + (context) -> escapes = escapes_; \ + (context) -> total = escapes_ + 256*minimum; \ + (context) -> symbols = 0; \ +} + +static enet_uint16 +enet_symbol_rescale (ENetSymbol * symbol) +{ + enet_uint16 total = 0; + for (;;) + { + symbol -> count -= symbol->count >> 1; + symbol -> under = symbol -> count; + if (symbol -> left) + symbol -> under += enet_symbol_rescale (symbol + symbol -> left); + total += symbol -> under; + if (! symbol -> right) break; + symbol += symbol -> right; + } + return total; +} + +#define ENET_CONTEXT_RESCALE(context, minimum) \ +{ \ + (context) -> total = (context) -> symbols ? enet_symbol_rescale ((context) + (context) -> symbols) : 0; \ + (context) -> escapes -= (context) -> escapes >> 1; \ + (context) -> total += (context) -> escapes + 256*minimum; \ +} + +#define ENET_RANGE_CODER_OUTPUT(value) \ +{ \ + if (outData >= outEnd) \ + return 0; \ + * outData ++ = value; \ +} + +#define ENET_RANGE_CODER_ENCODE(under, count, total) \ +{ \ + encodeRange /= (total); \ + encodeLow += (under) * encodeRange; \ + encodeRange *= (count); \ + for (;;) \ + { \ + if((encodeLow ^ (encodeLow + encodeRange)) >= ENET_RANGE_CODER_TOP) \ + { \ + if(encodeRange >= ENET_RANGE_CODER_BOTTOM) break; \ + encodeRange = -encodeLow & (ENET_RANGE_CODER_BOTTOM - 1); \ + } \ + ENET_RANGE_CODER_OUTPUT (encodeLow >> 24); \ + encodeRange <<= 8; \ + encodeLow <<= 8; \ + } \ +} + +#define ENET_RANGE_CODER_FLUSH \ +{ \ + while (encodeLow) \ + { \ + ENET_RANGE_CODER_OUTPUT (encodeLow >> 24); \ + encodeLow <<= 8; \ + } \ +} + +#define ENET_RANGE_CODER_FREE_SYMBOLS \ +{ \ + if (nextSymbol >= sizeof (rangeCoder -> symbols) / sizeof (ENetSymbol) - ENET_SUBCONTEXT_ORDER ) \ + { \ + nextSymbol = 0; \ + ENET_CONTEXT_CREATE (root, ENET_CONTEXT_ESCAPE_MINIMUM, ENET_CONTEXT_SYMBOL_MINIMUM); \ + predicted = 0; \ + order = 0; \ + } \ +} + +#define ENET_CONTEXT_ENCODE(context, symbol_, value_, under_, count_, update, minimum) \ +{ \ + under_ = value*minimum; \ + count_ = minimum; \ + if (! (context) -> symbols) \ + { \ + ENET_SYMBOL_CREATE (symbol_, value_, update); \ + (context) -> symbols = symbol_ - (context); \ + } \ + else \ + { \ + ENetSymbol * node = (context) + (context) -> symbols; \ + for (;;) \ + { \ + if (value_ < node -> value) \ + { \ + node -> under += update; \ + if (node -> left) { node += node -> left; continue; } \ + ENET_SYMBOL_CREATE (symbol_, value_, update); \ + node -> left = symbol_ - node; \ + } \ + else \ + if (value_ > node -> value) \ + { \ + under_ += node -> under; \ + if (node -> right) { node += node -> right; continue; } \ + ENET_SYMBOL_CREATE (symbol_, value_, update); \ + node -> right = symbol_ - node; \ + } \ + else \ + { \ + count_ += node -> count; \ + under_ += node -> under - node -> count; \ + node -> under += update; \ + node -> count += update; \ + symbol_ = node; \ + } \ + break; \ + } \ + } \ +} + +#ifdef ENET_CONTEXT_EXCLUSION +static const ENetSymbol emptyContext = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + +#define ENET_CONTEXT_WALK(context, body) \ +{ \ + const ENetSymbol * node = (context) + (context) -> symbols; \ + const ENetSymbol * stack [256]; \ + size_t stackSize = 0; \ + while (node -> left) \ + { \ + stack [stackSize ++] = node; \ + node += node -> left; \ + } \ + for (;;) \ + { \ + body; \ + if (node -> right) \ + { \ + node += node -> right; \ + while (node -> left) \ + { \ + stack [stackSize ++] = node; \ + node += node -> left; \ + } \ + } \ + else \ + if (stackSize <= 0) \ + break; \ + else \ + node = stack [-- stackSize]; \ + } \ +} + +#define ENET_CONTEXT_ENCODE_EXCLUDE(context, value_, under, total, minimum) \ +ENET_CONTEXT_WALK(context, { \ + if (node -> value != value_) \ + { \ + enet_uint16 parentCount = rangeCoder -> symbols [node -> parent].count + minimum; \ + if (node -> value < value_) \ + under -= parentCount; \ + total -= parentCount; \ + } \ +}) +#endif + +size_t +enet_range_coder_compress (void * context, const ENetBuffer * inBuffers, size_t inBufferCount, size_t inLimit, enet_uint8 * outData, size_t outLimit) +{ + ENetRangeCoder * rangeCoder = (ENetRangeCoder *) context; + enet_uint8 * outStart = outData, * outEnd = & outData [outLimit]; + const enet_uint8 * inData, * inEnd; + enet_uint32 encodeLow = 0, encodeRange = ~0; + ENetSymbol * root; + enet_uint16 predicted = 0; + size_t order = 0, nextSymbol = 0; + + if (rangeCoder == NULL || inBufferCount <= 0 || inLimit <= 0) + return 0; + + inData = (const enet_uint8 *) inBuffers -> data; + inEnd = & inData [inBuffers -> dataLength]; + inBuffers ++; + inBufferCount --; + + ENET_CONTEXT_CREATE (root, ENET_CONTEXT_ESCAPE_MINIMUM, ENET_CONTEXT_SYMBOL_MINIMUM); + + for (;;) + { + ENetSymbol * subcontext, * symbol; +#ifdef ENET_CONTEXT_EXCLUSION + const ENetSymbol * childContext = & emptyContext; +#endif + enet_uint8 value; + enet_uint16 count, under, * parent = & predicted, total; + if (inData >= inEnd) + { + if (inBufferCount <= 0) + break; + inData = (const enet_uint8 *) inBuffers -> data; + inEnd = & inData [inBuffers -> dataLength]; + inBuffers ++; + inBufferCount --; + } + value = * inData ++; + + for (subcontext = & rangeCoder -> symbols [predicted]; + subcontext != root; +#ifdef ENET_CONTEXT_EXCLUSION + childContext = subcontext, +#endif + subcontext = & rangeCoder -> symbols [subcontext -> parent]) + { + ENET_CONTEXT_ENCODE (subcontext, symbol, value, under, count, ENET_SUBCONTEXT_SYMBOL_DELTA, 0); + * parent = symbol - rangeCoder -> symbols; + parent = & symbol -> parent; + total = subcontext -> total; +#ifdef ENET_CONTEXT_EXCLUSION + if (childContext -> total > ENET_SUBCONTEXT_SYMBOL_DELTA + ENET_SUBCONTEXT_ESCAPE_DELTA) + ENET_CONTEXT_ENCODE_EXCLUDE (childContext, value, under, total, 0); +#endif + if (count > 0) + { + ENET_RANGE_CODER_ENCODE (subcontext -> escapes + under, count, total); + } + else + { + if (subcontext -> escapes > 0 && subcontext -> escapes < total) + ENET_RANGE_CODER_ENCODE (0, subcontext -> escapes, total); + subcontext -> escapes += ENET_SUBCONTEXT_ESCAPE_DELTA; + subcontext -> total += ENET_SUBCONTEXT_ESCAPE_DELTA; + } + subcontext -> total += ENET_SUBCONTEXT_SYMBOL_DELTA; + if (count > 0xFF - 2*ENET_SUBCONTEXT_SYMBOL_DELTA || subcontext -> total > ENET_RANGE_CODER_BOTTOM - 0x100) + ENET_CONTEXT_RESCALE (subcontext, 0); + if (count > 0) goto nextInput; + } + + ENET_CONTEXT_ENCODE (root, symbol, value, under, count, ENET_CONTEXT_SYMBOL_DELTA, ENET_CONTEXT_SYMBOL_MINIMUM); + * parent = symbol - rangeCoder -> symbols; + parent = & symbol -> parent; + total = root -> total; +#ifdef ENET_CONTEXT_EXCLUSION + if (childContext -> total > ENET_SUBCONTEXT_SYMBOL_DELTA + ENET_SUBCONTEXT_ESCAPE_DELTA) + ENET_CONTEXT_ENCODE_EXCLUDE (childContext, value, under, total, ENET_CONTEXT_SYMBOL_MINIMUM); +#endif + ENET_RANGE_CODER_ENCODE (root -> escapes + under, count, total); + root -> total += ENET_CONTEXT_SYMBOL_DELTA; + if (count > 0xFF - 2*ENET_CONTEXT_SYMBOL_DELTA + ENET_CONTEXT_SYMBOL_MINIMUM || root -> total > ENET_RANGE_CODER_BOTTOM - 0x100) + ENET_CONTEXT_RESCALE (root, ENET_CONTEXT_SYMBOL_MINIMUM); + + nextInput: + if (order >= ENET_SUBCONTEXT_ORDER) + predicted = rangeCoder -> symbols [predicted].parent; + else + order ++; + ENET_RANGE_CODER_FREE_SYMBOLS; + } + + ENET_RANGE_CODER_FLUSH; + + return (size_t) (outData - outStart); +} + +#define ENET_RANGE_CODER_SEED \ +{ \ + if (inData < inEnd) decodeCode |= * inData ++ << 24; \ + if (inData < inEnd) decodeCode |= * inData ++ << 16; \ + if (inData < inEnd) decodeCode |= * inData ++ << 8; \ + if (inData < inEnd) decodeCode |= * inData ++; \ +} + +#define ENET_RANGE_CODER_READ(total) ((decodeCode - decodeLow) / (decodeRange /= (total))) + +#define ENET_RANGE_CODER_DECODE(under, count, total) \ +{ \ + decodeLow += (under) * decodeRange; \ + decodeRange *= (count); \ + for (;;) \ + { \ + if((decodeLow ^ (decodeLow + decodeRange)) >= ENET_RANGE_CODER_TOP) \ + { \ + if(decodeRange >= ENET_RANGE_CODER_BOTTOM) break; \ + decodeRange = -decodeLow & (ENET_RANGE_CODER_BOTTOM - 1); \ + } \ + decodeCode <<= 8; \ + if (inData < inEnd) \ + decodeCode |= * inData ++; \ + decodeRange <<= 8; \ + decodeLow <<= 8; \ + } \ +} + +#define ENET_CONTEXT_DECODE(context, symbol_, code, value_, under_, count_, update, minimum, createRoot, visitNode, createRight, createLeft) \ +{ \ + under_ = 0; \ + count_ = minimum; \ + if (! (context) -> symbols) \ + { \ + createRoot; \ + } \ + else \ + { \ + ENetSymbol * node = (context) + (context) -> symbols; \ + for (;;) \ + { \ + enet_uint16 after = under_ + node -> under + (node -> value + 1)*minimum, before = node -> count + minimum; \ + visitNode; \ + if (code >= after) \ + { \ + under_ += node -> under; \ + if (node -> right) { node += node -> right; continue; } \ + createRight; \ + } \ + else \ + if (code < after - before) \ + { \ + node -> under += update; \ + if (node -> left) { node += node -> left; continue; } \ + createLeft; \ + } \ + else \ + { \ + value_ = node -> value; \ + count_ += node -> count; \ + under_ = after - before; \ + node -> under += update; \ + node -> count += update; \ + symbol_ = node; \ + } \ + break; \ + } \ + } \ +} + +#define ENET_CONTEXT_TRY_DECODE(context, symbol_, code, value_, under_, count_, update, minimum, exclude) \ +ENET_CONTEXT_DECODE (context, symbol_, code, value_, under_, count_, update, minimum, return 0, exclude (node -> value, after, before), return 0, return 0) + +#define ENET_CONTEXT_ROOT_DECODE(context, symbol_, code, value_, under_, count_, update, minimum, exclude) \ +ENET_CONTEXT_DECODE (context, symbol_, code, value_, under_, count_, update, minimum, \ + { \ + value_ = code / minimum; \ + under_ = code - code%minimum; \ + ENET_SYMBOL_CREATE (symbol_, value_, update); \ + (context) -> symbols = symbol_ - (context); \ + }, \ + exclude (node -> value, after, before), \ + { \ + value_ = node->value + 1 + (code - after)/minimum; \ + under_ = code - (code - after)%minimum; \ + ENET_SYMBOL_CREATE (symbol_, value_, update); \ + node -> right = symbol_ - node; \ + }, \ + { \ + value_ = node->value - 1 - (after - before - code - 1)/minimum; \ + under_ = code - (after - before - code - 1)%minimum; \ + ENET_SYMBOL_CREATE (symbol_, value_, update); \ + node -> left = symbol_ - node; \ + }) \ + +#ifdef ENET_CONTEXT_EXCLUSION +typedef struct _ENetExclude +{ + enet_uint8 value; + enet_uint16 under; +} ENetExclude; + +#define ENET_CONTEXT_DECODE_EXCLUDE(context, total, minimum) \ +{ \ + enet_uint16 under = 0; \ + nextExclude = excludes; \ + ENET_CONTEXT_WALK (context, { \ + under += rangeCoder -> symbols [node -> parent].count + minimum; \ + nextExclude -> value = node -> value; \ + nextExclude -> under = under; \ + nextExclude ++; \ + }); \ + total -= under; \ +} + +#define ENET_CONTEXT_EXCLUDED(value_, after, before) \ +{ \ + size_t low = 0, high = nextExclude - excludes; \ + for(;;) \ + { \ + size_t mid = (low + high) >> 1; \ + const ENetExclude * exclude = & excludes [mid]; \ + if (value_ < exclude -> value) \ + { \ + if (low + 1 < high) \ + { \ + high = mid; \ + continue; \ + } \ + if (exclude > excludes) \ + after -= exclude [-1].under; \ + } \ + else \ + { \ + if (value_ > exclude -> value) \ + { \ + if (low + 1 < high) \ + { \ + low = mid; \ + continue; \ + } \ + } \ + else \ + before = 0; \ + after -= exclude -> under; \ + } \ + break; \ + } \ +} +#endif + +#define ENET_CONTEXT_NOT_EXCLUDED(value_, after, before) + +size_t +enet_range_coder_decompress (void * context, const enet_uint8 * inData, size_t inLimit, enet_uint8 * outData, size_t outLimit) +{ + ENetRangeCoder * rangeCoder = (ENetRangeCoder *) context; + enet_uint8 * outStart = outData, * outEnd = & outData [outLimit]; + const enet_uint8 * inEnd = & inData [inLimit]; + enet_uint32 decodeLow = 0, decodeCode = 0, decodeRange = ~0; + ENetSymbol * root; + enet_uint16 predicted = 0; + size_t order = 0, nextSymbol = 0; +#ifdef ENET_CONTEXT_EXCLUSION + ENetExclude excludes [256]; + ENetExclude * nextExclude = excludes; +#endif + + if (rangeCoder == NULL || inLimit <= 0) + return 0; + + ENET_CONTEXT_CREATE (root, ENET_CONTEXT_ESCAPE_MINIMUM, ENET_CONTEXT_SYMBOL_MINIMUM); + + ENET_RANGE_CODER_SEED; + + for (;;) + { + ENetSymbol * subcontext, * symbol, * patch; +#ifdef ENET_CONTEXT_EXCLUSION + const ENetSymbol * childContext = & emptyContext; +#endif + enet_uint8 value = 0; + enet_uint16 code, under, count, bottom, * parent = & predicted, total; + + for (subcontext = & rangeCoder -> symbols [predicted]; + subcontext != root; +#ifdef ENET_CONTEXT_EXCLUSION + childContext = subcontext, +#endif + subcontext = & rangeCoder -> symbols [subcontext -> parent]) + { + if (subcontext -> escapes <= 0) + continue; + total = subcontext -> total; +#ifdef ENET_CONTEXT_EXCLUSION + if (childContext -> total > 0) + ENET_CONTEXT_DECODE_EXCLUDE (childContext, total, 0); +#endif + if (subcontext -> escapes >= total) + continue; + code = ENET_RANGE_CODER_READ (total); + if (code < subcontext -> escapes) + { + ENET_RANGE_CODER_DECODE (0, subcontext -> escapes, total); + continue; + } + code -= subcontext -> escapes; +#ifdef ENET_CONTEXT_EXCLUSION + if (childContext -> total > 0) + { + ENET_CONTEXT_TRY_DECODE (subcontext, symbol, code, value, under, count, ENET_SUBCONTEXT_SYMBOL_DELTA, 0, ENET_CONTEXT_EXCLUDED); + } + else +#endif + { + ENET_CONTEXT_TRY_DECODE (subcontext, symbol, code, value, under, count, ENET_SUBCONTEXT_SYMBOL_DELTA, 0, ENET_CONTEXT_NOT_EXCLUDED); + } + bottom = symbol - rangeCoder -> symbols; + ENET_RANGE_CODER_DECODE (subcontext -> escapes + under, count, total); + subcontext -> total += ENET_SUBCONTEXT_SYMBOL_DELTA; + if (count > 0xFF - 2*ENET_SUBCONTEXT_SYMBOL_DELTA || subcontext -> total > ENET_RANGE_CODER_BOTTOM - 0x100) + ENET_CONTEXT_RESCALE (subcontext, 0); + goto patchContexts; + } + + total = root -> total; +#ifdef ENET_CONTEXT_EXCLUSION + if (childContext -> total > 0) + ENET_CONTEXT_DECODE_EXCLUDE (childContext, total, ENET_CONTEXT_SYMBOL_MINIMUM); +#endif + code = ENET_RANGE_CODER_READ (total); + if (code < root -> escapes) + { + ENET_RANGE_CODER_DECODE (0, root -> escapes, total); + break; + } + code -= root -> escapes; +#ifdef ENET_CONTEXT_EXCLUSION + if (childContext -> total > 0) + { + ENET_CONTEXT_ROOT_DECODE (root, symbol, code, value, under, count, ENET_CONTEXT_SYMBOL_DELTA, ENET_CONTEXT_SYMBOL_MINIMUM, ENET_CONTEXT_EXCLUDED); + } + else +#endif + { + ENET_CONTEXT_ROOT_DECODE (root, symbol, code, value, under, count, ENET_CONTEXT_SYMBOL_DELTA, ENET_CONTEXT_SYMBOL_MINIMUM, ENET_CONTEXT_NOT_EXCLUDED); + } + bottom = symbol - rangeCoder -> symbols; + ENET_RANGE_CODER_DECODE (root -> escapes + under, count, total); + root -> total += ENET_CONTEXT_SYMBOL_DELTA; + if (count > 0xFF - 2*ENET_CONTEXT_SYMBOL_DELTA + ENET_CONTEXT_SYMBOL_MINIMUM || root -> total > ENET_RANGE_CODER_BOTTOM - 0x100) + ENET_CONTEXT_RESCALE (root, ENET_CONTEXT_SYMBOL_MINIMUM); + + patchContexts: + for (patch = & rangeCoder -> symbols [predicted]; + patch != subcontext; + patch = & rangeCoder -> symbols [patch -> parent]) + { + ENET_CONTEXT_ENCODE (patch, symbol, value, under, count, ENET_SUBCONTEXT_SYMBOL_DELTA, 0); + * parent = symbol - rangeCoder -> symbols; + parent = & symbol -> parent; + if (count <= 0) + { + patch -> escapes += ENET_SUBCONTEXT_ESCAPE_DELTA; + patch -> total += ENET_SUBCONTEXT_ESCAPE_DELTA; + } + patch -> total += ENET_SUBCONTEXT_SYMBOL_DELTA; + if (count > 0xFF - 2*ENET_SUBCONTEXT_SYMBOL_DELTA || patch -> total > ENET_RANGE_CODER_BOTTOM - 0x100) + ENET_CONTEXT_RESCALE (patch, 0); + } + * parent = bottom; + + ENET_RANGE_CODER_OUTPUT (value); + + if (order >= ENET_SUBCONTEXT_ORDER) + predicted = rangeCoder -> symbols [predicted].parent; + else + order ++; + ENET_RANGE_CODER_FREE_SYMBOLS; + } + + return (size_t) (outData - outStart); +} + +/** @defgroup host ENet host functions + @{ +*/ + +/** Sets the packet compressor the host should use to the default range coder. + @param host host to enable the range coder for + @returns 0 on success, < 0 on failure +*/ +int +enet_host_compress_with_range_coder (ENetHost * host) +{ + ENetCompressor compressor; + memset (& compressor, 0, sizeof (compressor)); + compressor.context = enet_range_coder_create(); + if (compressor.context == NULL) + return -1; + compressor.compress = enet_range_coder_compress; + compressor.decompress = enet_range_coder_decompress; + compressor.destroy = enet_range_coder_destroy; + enet_host_compress (host, & compressor); + return 0; +} + +/** @} */ + + diff --git a/pkg/enet/enet/config.guess b/pkg/enet/enet/config.guess new file mode 100755 index 000000000..e81d3ae7c --- /dev/null +++ b/pkg/enet/enet/config.guess @@ -0,0 +1,1748 @@ +#! /bin/sh +# Attempt to guess a canonical system name. +# Copyright 1992-2021 Free Software Foundation, Inc. + +# shellcheck disable=SC2006,SC2268 # see below for rationale + +timestamp='2021-06-03' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). +# +# Originally written by Per Bothner; maintained since 2000 by Ben Elliston. +# +# You can get the latest version of this script from: +# https://git.savannah.gnu.org/cgit/config.git/plain/config.guess +# +# Please send patches to . + + +# The "shellcheck disable" line above the timestamp inhibits complaints +# about features and limitations of the classic Bourne shell that were +# superseded or lifted in POSIX. However, this script identifies a wide +# variety of pre-POSIX systems that do not have POSIX shells at all, and +# even some reasonably current systems (Solaris 10 as case-in-point) still +# have a pre-POSIX /bin/sh. + + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] + +Output the configuration name of the system \`$me' is run on. + +Options: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.guess ($timestamp) + +Originally written by Per Bothner. +Copyright 1992-2021 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + * ) + break ;; + esac +done + +if test $# != 0; then + echo "$me: too many arguments$help" >&2 + exit 1 +fi + +# Just in case it came from the environment. +GUESS= + +# CC_FOR_BUILD -- compiler used by this script. Note that the use of a +# compiler to aid in system detection is discouraged as it requires +# temporary files to be created and, as you can see below, it is a +# headache to deal with in a portable fashion. + +# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still +# use `HOST_CC' if defined, but it is deprecated. + +# Portable tmp directory creation inspired by the Autoconf team. + +tmp= +# shellcheck disable=SC2172 +trap 'test -z "$tmp" || rm -fr "$tmp"' 0 1 2 13 15 + +set_cc_for_build() { + # prevent multiple calls if $tmp is already set + test "$tmp" && return 0 + : "${TMPDIR=/tmp}" + # shellcheck disable=SC2039,SC3028 + { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir "$tmp" 2>/dev/null) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir "$tmp" 2>/dev/null) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } + dummy=$tmp/dummy + case ${CC_FOR_BUILD-},${HOST_CC-},${CC-} in + ,,) echo "int x;" > "$dummy.c" + for driver in cc gcc c89 c99 ; do + if ($driver -c -o "$dummy.o" "$dummy.c") >/dev/null 2>&1 ; then + CC_FOR_BUILD=$driver + break + fi + done + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; + esac +} + +# This is needed to find uname on a Pyramid OSx when run in the BSD universe. +# (ghazi@noc.rutgers.edu 1994-08-24) +if test -f /.attbin/uname ; then + PATH=$PATH:/.attbin ; export PATH +fi + +UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown +UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown +UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown +UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown + +case $UNAME_SYSTEM in +Linux|GNU|GNU/*) + LIBC=unknown + + set_cc_for_build + cat <<-EOF > "$dummy.c" + #include + #if defined(__UCLIBC__) + LIBC=uclibc + #elif defined(__dietlibc__) + LIBC=dietlibc + #elif defined(__GLIBC__) + LIBC=gnu + #else + #include + /* First heuristic to detect musl libc. */ + #ifdef __DEFINED_va_list + LIBC=musl + #endif + #endif + EOF + cc_set_libc=`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g'` + eval "$cc_set_libc" + + # Second heuristic to detect musl libc. + if [ "$LIBC" = unknown ] && + command -v ldd >/dev/null && + ldd --version 2>&1 | grep -q ^musl; then + LIBC=musl + fi + + # If the system lacks a compiler, then just pick glibc. + # We could probably try harder. + if [ "$LIBC" = unknown ]; then + LIBC=gnu + fi + ;; +esac + +# Note: order is significant - the case branches are not exclusive. + +case $UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION in + *:NetBSD:*:*) + # NetBSD (nbsd) targets should (where applicable) match one or + # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, + # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently + # switched to ELF, *-*-netbsd* would select the old + # object file format. This provides both forward + # compatibility and a consistent mechanism for selecting the + # object file format. + # + # Note: NetBSD doesn't particularly care about the vendor + # portion of the name. We always set it to "unknown". + UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \ + /sbin/sysctl -n hw.machine_arch 2>/dev/null || \ + /usr/sbin/sysctl -n hw.machine_arch 2>/dev/null || \ + echo unknown)` + case $UNAME_MACHINE_ARCH in + aarch64eb) machine=aarch64_be-unknown ;; + armeb) machine=armeb-unknown ;; + arm*) machine=arm-unknown ;; + sh3el) machine=shl-unknown ;; + sh3eb) machine=sh-unknown ;; + sh5el) machine=sh5le-unknown ;; + earmv*) + arch=`echo "$UNAME_MACHINE_ARCH" | sed -e 's,^e\(armv[0-9]\).*$,\1,'` + endian=`echo "$UNAME_MACHINE_ARCH" | sed -ne 's,^.*\(eb\)$,\1,p'` + machine=${arch}${endian}-unknown + ;; + *) machine=$UNAME_MACHINE_ARCH-unknown ;; + esac + # The Operating System including object format, if it has switched + # to ELF recently (or will in the future) and ABI. + case $UNAME_MACHINE_ARCH in + earm*) + os=netbsdelf + ;; + arm*|i386|m68k|ns32k|sh3*|sparc|vax) + set_cc_for_build + if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ELF__ + then + # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). + # Return netbsd for either. FIX? + os=netbsd + else + os=netbsdelf + fi + ;; + *) + os=netbsd + ;; + esac + # Determine ABI tags. + case $UNAME_MACHINE_ARCH in + earm*) + expr='s/^earmv[0-9]/-eabi/;s/eb$//' + abi=`echo "$UNAME_MACHINE_ARCH" | sed -e "$expr"` + ;; + esac + # The OS release + # Debian GNU/NetBSD machines have a different userland, and + # thus, need a distinct triplet. However, they do not need + # kernel version information, so it can be replaced with a + # suitable tag, in the style of linux-gnu. + case $UNAME_VERSION in + Debian*) + release='-gnu' + ;; + *) + release=`echo "$UNAME_RELEASE" | sed -e 's/[-_].*//' | cut -d. -f1,2` + ;; + esac + # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: + # contains redundant information, the shorter form: + # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. + GUESS=$machine-${os}${release}${abi-} + ;; + *:Bitrig:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` + GUESS=$UNAME_MACHINE_ARCH-unknown-bitrig$UNAME_RELEASE + ;; + *:OpenBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` + GUESS=$UNAME_MACHINE_ARCH-unknown-openbsd$UNAME_RELEASE + ;; + *:SecBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/SecBSD.//'` + GUESS=$UNAME_MACHINE_ARCH-unknown-secbsd$UNAME_RELEASE + ;; + *:LibertyBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'` + GUESS=$UNAME_MACHINE_ARCH-unknown-libertybsd$UNAME_RELEASE + ;; + *:MidnightBSD:*:*) + GUESS=$UNAME_MACHINE-unknown-midnightbsd$UNAME_RELEASE + ;; + *:ekkoBSD:*:*) + GUESS=$UNAME_MACHINE-unknown-ekkobsd$UNAME_RELEASE + ;; + *:SolidBSD:*:*) + GUESS=$UNAME_MACHINE-unknown-solidbsd$UNAME_RELEASE + ;; + *:OS108:*:*) + GUESS=$UNAME_MACHINE-unknown-os108_$UNAME_RELEASE + ;; + macppc:MirBSD:*:*) + GUESS=powerpc-unknown-mirbsd$UNAME_RELEASE + ;; + *:MirBSD:*:*) + GUESS=$UNAME_MACHINE-unknown-mirbsd$UNAME_RELEASE + ;; + *:Sortix:*:*) + GUESS=$UNAME_MACHINE-unknown-sortix + ;; + *:Twizzler:*:*) + GUESS=$UNAME_MACHINE-unknown-twizzler + ;; + *:Redox:*:*) + GUESS=$UNAME_MACHINE-unknown-redox + ;; + mips:OSF1:*.*) + GUESS=mips-dec-osf1 + ;; + alpha:OSF1:*:*) + # Reset EXIT trap before exiting to avoid spurious non-zero exit code. + trap '' 0 + case $UNAME_RELEASE in + *4.0) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` + ;; + *5.*) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` + ;; + esac + # According to Compaq, /usr/sbin/psrinfo has been available on + # OSF/1 and Tru64 systems produced since 1995. I hope that + # covers most systems running today. This code pipes the CPU + # types through head -n 1, so we only detect the type of CPU 0. + ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` + case $ALPHA_CPU_TYPE in + "EV4 (21064)") + UNAME_MACHINE=alpha ;; + "EV4.5 (21064)") + UNAME_MACHINE=alpha ;; + "LCA4 (21066/21068)") + UNAME_MACHINE=alpha ;; + "EV5 (21164)") + UNAME_MACHINE=alphaev5 ;; + "EV5.6 (21164A)") + UNAME_MACHINE=alphaev56 ;; + "EV5.6 (21164PC)") + UNAME_MACHINE=alphapca56 ;; + "EV5.7 (21164PC)") + UNAME_MACHINE=alphapca57 ;; + "EV6 (21264)") + UNAME_MACHINE=alphaev6 ;; + "EV6.7 (21264A)") + UNAME_MACHINE=alphaev67 ;; + "EV6.8CB (21264C)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8AL (21264B)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8CX (21264D)") + UNAME_MACHINE=alphaev68 ;; + "EV6.9A (21264/EV69A)") + UNAME_MACHINE=alphaev69 ;; + "EV7 (21364)") + UNAME_MACHINE=alphaev7 ;; + "EV7.9 (21364A)") + UNAME_MACHINE=alphaev79 ;; + esac + # A Pn.n version is a patched version. + # A Vn.n version is a released version. + # A Tn.n version is a released field test version. + # A Xn.n version is an unreleased experimental baselevel. + # 1.2 uses "1.2" for uname -r. + OSF_REL=`echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + GUESS=$UNAME_MACHINE-dec-osf$OSF_REL + ;; + Amiga*:UNIX_System_V:4.0:*) + GUESS=m68k-unknown-sysv4 + ;; + *:[Aa]miga[Oo][Ss]:*:*) + GUESS=$UNAME_MACHINE-unknown-amigaos + ;; + *:[Mm]orph[Oo][Ss]:*:*) + GUESS=$UNAME_MACHINE-unknown-morphos + ;; + *:OS/390:*:*) + GUESS=i370-ibm-openedition + ;; + *:z/VM:*:*) + GUESS=s390-ibm-zvmoe + ;; + *:OS400:*:*) + GUESS=powerpc-ibm-os400 + ;; + arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) + GUESS=arm-acorn-riscix$UNAME_RELEASE + ;; + arm*:riscos:*:*|arm*:RISCOS:*:*) + GUESS=arm-unknown-riscos + ;; + SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) + GUESS=hppa1.1-hitachi-hiuxmpp + ;; + Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) + # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. + case `(/bin/universe) 2>/dev/null` in + att) GUESS=pyramid-pyramid-sysv3 ;; + *) GUESS=pyramid-pyramid-bsd ;; + esac + ;; + NILE*:*:*:dcosx) + GUESS=pyramid-pyramid-svr4 + ;; + DRS?6000:unix:4.0:6*) + GUESS=sparc-icl-nx6 + ;; + DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) + case `/usr/bin/uname -p` in + sparc) GUESS=sparc-icl-nx7 ;; + esac + ;; + s390x:SunOS:*:*) + SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'` + GUESS=$UNAME_MACHINE-ibm-solaris2$SUN_REL + ;; + sun4H:SunOS:5.*:*) + SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'` + GUESS=sparc-hal-solaris2$SUN_REL + ;; + sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) + SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'` + GUESS=sparc-sun-solaris2$SUN_REL + ;; + i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) + GUESS=i386-pc-auroraux$UNAME_RELEASE + ;; + i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) + set_cc_for_build + SUN_ARCH=i386 + # If there is a compiler, see if it is configured for 64-bit objects. + # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. + # This test works for both compilers. + if test "$CC_FOR_BUILD" != no_compiler_found; then + if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + SUN_ARCH=x86_64 + fi + fi + SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'` + GUESS=$SUN_ARCH-pc-solaris2$SUN_REL + ;; + sun4*:SunOS:6*:*) + # According to config.sub, this is the proper way to canonicalize + # SunOS6. Hard to guess exactly what SunOS6 will be like, but + # it's likely to be more like Solaris than SunOS4. + SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'` + GUESS=sparc-sun-solaris3$SUN_REL + ;; + sun4*:SunOS:*:*) + case `/usr/bin/arch -k` in + Series*|S4*) + UNAME_RELEASE=`uname -v` + ;; + esac + # Japanese Language versions have a version number like `4.1.3-JL'. + SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/-/_/'` + GUESS=sparc-sun-sunos$SUN_REL + ;; + sun3*:SunOS:*:*) + GUESS=m68k-sun-sunos$UNAME_RELEASE + ;; + sun*:*:4.2BSD:*) + UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` + test "x$UNAME_RELEASE" = x && UNAME_RELEASE=3 + case `/bin/arch` in + sun3) + GUESS=m68k-sun-sunos$UNAME_RELEASE + ;; + sun4) + GUESS=sparc-sun-sunos$UNAME_RELEASE + ;; + esac + ;; + aushp:SunOS:*:*) + GUESS=sparc-auspex-sunos$UNAME_RELEASE + ;; + # The situation for MiNT is a little confusing. The machine name + # can be virtually everything (everything which is not + # "atarist" or "atariste" at least should have a processor + # > m68000). The system name ranges from "MiNT" over "FreeMiNT" + # to the lowercase version "mint" (or "freemint"). Finally + # the system name "TOS" denotes a system which is actually not + # MiNT. But MiNT is downward compatible to TOS, so this should + # be no problem. + atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) + GUESS=m68k-atari-mint$UNAME_RELEASE + ;; + atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) + GUESS=m68k-atari-mint$UNAME_RELEASE + ;; + *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) + GUESS=m68k-atari-mint$UNAME_RELEASE + ;; + milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) + GUESS=m68k-milan-mint$UNAME_RELEASE + ;; + hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) + GUESS=m68k-hades-mint$UNAME_RELEASE + ;; + *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) + GUESS=m68k-unknown-mint$UNAME_RELEASE + ;; + m68k:machten:*:*) + GUESS=m68k-apple-machten$UNAME_RELEASE + ;; + powerpc:machten:*:*) + GUESS=powerpc-apple-machten$UNAME_RELEASE + ;; + RISC*:Mach:*:*) + GUESS=mips-dec-mach_bsd4.3 + ;; + RISC*:ULTRIX:*:*) + GUESS=mips-dec-ultrix$UNAME_RELEASE + ;; + VAX*:ULTRIX*:*:*) + GUESS=vax-dec-ultrix$UNAME_RELEASE + ;; + 2020:CLIX:*:* | 2430:CLIX:*:*) + GUESS=clipper-intergraph-clix$UNAME_RELEASE + ;; + mips:*:*:UMIPS | mips:*:*:RISCos) + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" +#ifdef __cplusplus +#include /* for printf() prototype */ + int main (int argc, char *argv[]) { +#else + int main (argc, argv) int argc; char *argv[]; { +#endif + #if defined (host_mips) && defined (MIPSEB) + #if defined (SYSTYPE_SYSV) + printf ("mips-mips-riscos%ssysv\\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_SVR4) + printf ("mips-mips-riscos%ssvr4\\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) + printf ("mips-mips-riscos%sbsd\\n", argv[1]); exit (0); + #endif + #endif + exit (-1); + } +EOF + $CC_FOR_BUILD -o "$dummy" "$dummy.c" && + dummyarg=`echo "$UNAME_RELEASE" | sed -n 's/\([0-9]*\).*/\1/p'` && + SYSTEM_NAME=`"$dummy" "$dummyarg"` && + { echo "$SYSTEM_NAME"; exit; } + GUESS=mips-mips-riscos$UNAME_RELEASE + ;; + Motorola:PowerMAX_OS:*:*) + GUESS=powerpc-motorola-powermax + ;; + Motorola:*:4.3:PL8-*) + GUESS=powerpc-harris-powermax + ;; + Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) + GUESS=powerpc-harris-powermax + ;; + Night_Hawk:Power_UNIX:*:*) + GUESS=powerpc-harris-powerunix + ;; + m88k:CX/UX:7*:*) + GUESS=m88k-harris-cxux7 + ;; + m88k:*:4*:R4*) + GUESS=m88k-motorola-sysv4 + ;; + m88k:*:3*:R3*) + GUESS=m88k-motorola-sysv3 + ;; + AViiON:dgux:*:*) + # DG/UX returns AViiON for all architectures + UNAME_PROCESSOR=`/usr/bin/uname -p` + if test "$UNAME_PROCESSOR" = mc88100 || test "$UNAME_PROCESSOR" = mc88110 + then + if test "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx || \ + test "$TARGET_BINARY_INTERFACE"x = x + then + GUESS=m88k-dg-dgux$UNAME_RELEASE + else + GUESS=m88k-dg-dguxbcs$UNAME_RELEASE + fi + else + GUESS=i586-dg-dgux$UNAME_RELEASE + fi + ;; + M88*:DolphinOS:*:*) # DolphinOS (SVR3) + GUESS=m88k-dolphin-sysv3 + ;; + M88*:*:R3*:*) + # Delta 88k system running SVR3 + GUESS=m88k-motorola-sysv3 + ;; + XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) + GUESS=m88k-tektronix-sysv3 + ;; + Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) + GUESS=m68k-tektronix-bsd + ;; + *:IRIX*:*:*) + IRIX_REL=`echo "$UNAME_RELEASE" | sed -e 's/-/_/g'` + GUESS=mips-sgi-irix$IRIX_REL + ;; + ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. + GUESS=romp-ibm-aix # uname -m gives an 8 hex-code CPU id + ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + i*86:AIX:*:*) + GUESS=i386-ibm-aix + ;; + ia64:AIX:*:*) + if test -x /usr/bin/oslevel ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=$UNAME_VERSION.$UNAME_RELEASE + fi + GUESS=$UNAME_MACHINE-ibm-aix$IBM_REV + ;; + *:AIX:2:3) + if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" + #include + + main() + { + if (!__power_pc()) + exit(1); + puts("powerpc-ibm-aix3.2.5"); + exit(0); + } +EOF + if $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` + then + GUESS=$SYSTEM_NAME + else + GUESS=rs6000-ibm-aix3.2.5 + fi + elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then + GUESS=rs6000-ibm-aix3.2.4 + else + GUESS=rs6000-ibm-aix3.2 + fi + ;; + *:AIX:*:[4567]) + IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` + if /usr/sbin/lsattr -El "$IBM_CPU_ID" | grep ' POWER' >/dev/null 2>&1; then + IBM_ARCH=rs6000 + else + IBM_ARCH=powerpc + fi + if test -x /usr/bin/lslpp ; then + IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | \ + awk -F: '{ print $3 }' | sed s/[0-9]*$/0/` + else + IBM_REV=$UNAME_VERSION.$UNAME_RELEASE + fi + GUESS=$IBM_ARCH-ibm-aix$IBM_REV + ;; + *:AIX:*:*) + GUESS=rs6000-ibm-aix + ;; + ibmrt:4.4BSD:*|romp-ibm:4.4BSD:*) + GUESS=romp-ibm-bsd4.4 + ;; + ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and + GUESS=romp-ibm-bsd$UNAME_RELEASE # 4.3 with uname added to + ;; # report: romp-ibm BSD 4.3 + *:BOSX:*:*) + GUESS=rs6000-bull-bosx + ;; + DPX/2?00:B.O.S.:*:*) + GUESS=m68k-bull-sysv3 + ;; + 9000/[34]??:4.3bsd:1.*:*) + GUESS=m68k-hp-bsd + ;; + hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) + GUESS=m68k-hp-bsd4.4 + ;; + 9000/[34678]??:HP-UX:*:*) + HPUX_REV=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*.[0B]*//'` + case $UNAME_MACHINE in + 9000/31?) HP_ARCH=m68000 ;; + 9000/[34]??) HP_ARCH=m68k ;; + 9000/[678][0-9][0-9]) + if test -x /usr/bin/getconf; then + sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` + sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` + case $sc_cpu_version in + 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 + 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 + 532) # CPU_PA_RISC2_0 + case $sc_kernel_bits in + 32) HP_ARCH=hppa2.0n ;; + 64) HP_ARCH=hppa2.0w ;; + '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 + esac ;; + esac + fi + if test "$HP_ARCH" = ""; then + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" + + #define _HPUX_SOURCE + #include + #include + + int main () + { + #if defined(_SC_KERNEL_BITS) + long bits = sysconf(_SC_KERNEL_BITS); + #endif + long cpu = sysconf (_SC_CPU_VERSION); + + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1"); break; + case CPU_PA_RISC2_0: + #if defined(_SC_KERNEL_BITS) + switch (bits) + { + case 64: puts ("hppa2.0w"); break; + case 32: puts ("hppa2.0n"); break; + default: puts ("hppa2.0"); break; + } break; + #else /* !defined(_SC_KERNEL_BITS) */ + puts ("hppa2.0"); break; + #endif + default: puts ("hppa1.0"); break; + } + exit (0); + } +EOF + (CCOPTS="" $CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null) && HP_ARCH=`"$dummy"` + test -z "$HP_ARCH" && HP_ARCH=hppa + fi ;; + esac + if test "$HP_ARCH" = hppa2.0w + then + set_cc_for_build + + # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating + # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler + # generating 64-bit code. GNU and HP use different nomenclature: + # + # $ CC_FOR_BUILD=cc ./config.guess + # => hppa2.0w-hp-hpux11.23 + # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess + # => hppa64-hp-hpux11.23 + + if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | + grep -q __LP64__ + then + HP_ARCH=hppa2.0w + else + HP_ARCH=hppa64 + fi + fi + GUESS=$HP_ARCH-hp-hpux$HPUX_REV + ;; + ia64:HP-UX:*:*) + HPUX_REV=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*.[0B]*//'` + GUESS=ia64-hp-hpux$HPUX_REV + ;; + 3050*:HI-UX:*:*) + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" + #include + int + main () + { + long cpu = sysconf (_SC_CPU_VERSION); + /* The order matters, because CPU_IS_HP_MC68K erroneously returns + true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct + results, however. */ + if (CPU_IS_PA_RISC (cpu)) + { + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; + case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; + default: puts ("hppa-hitachi-hiuxwe2"); break; + } + } + else if (CPU_IS_HP_MC68K (cpu)) + puts ("m68k-hitachi-hiuxwe2"); + else puts ("unknown-hitachi-hiuxwe2"); + exit (0); + } +EOF + $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` && + { echo "$SYSTEM_NAME"; exit; } + GUESS=unknown-hitachi-hiuxwe2 + ;; + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:*) + GUESS=hppa1.1-hp-bsd + ;; + 9000/8??:4.3bsd:*:*) + GUESS=hppa1.0-hp-bsd + ;; + *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) + GUESS=hppa1.0-hp-mpeix + ;; + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:*) + GUESS=hppa1.1-hp-osf + ;; + hp8??:OSF1:*:*) + GUESS=hppa1.0-hp-osf + ;; + i*86:OSF1:*:*) + if test -x /usr/sbin/sysversion ; then + GUESS=$UNAME_MACHINE-unknown-osf1mk + else + GUESS=$UNAME_MACHINE-unknown-osf1 + fi + ;; + parisc*:Lites*:*:*) + GUESS=hppa1.1-hp-lites + ;; + C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) + GUESS=c1-convex-bsd + ;; + C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) + GUESS=c34-convex-bsd + ;; + C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) + GUESS=c38-convex-bsd + ;; + C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) + GUESS=c4-convex-bsd + ;; + CRAY*Y-MP:*:*:*) + CRAY_REL=`echo "$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'` + GUESS=ymp-cray-unicos$CRAY_REL + ;; + CRAY*[A-Z]90:*:*:*) + echo "$UNAME_MACHINE"-cray-unicos"$UNAME_RELEASE" \ + | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ + -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ + -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*TS:*:*:*) + CRAY_REL=`echo "$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'` + GUESS=t90-cray-unicos$CRAY_REL + ;; + CRAY*T3E:*:*:*) + CRAY_REL=`echo "$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'` + GUESS=alphaev5-cray-unicosmk$CRAY_REL + ;; + CRAY*SV1:*:*:*) + CRAY_REL=`echo "$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'` + GUESS=sv1-cray-unicos$CRAY_REL + ;; + *:UNICOS/mp:*:*) + CRAY_REL=`echo "$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'` + GUESS=craynv-cray-unicosmp$CRAY_REL + ;; + F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) + FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo "$UNAME_RELEASE" | sed -e 's/ /_/'` + GUESS=${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL} + ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo "$UNAME_RELEASE" | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'` + GUESS=sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL} + ;; + i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) + GUESS=$UNAME_MACHINE-pc-bsdi$UNAME_RELEASE + ;; + sparc*:BSD/OS:*:*) + GUESS=sparc-unknown-bsdi$UNAME_RELEASE + ;; + *:BSD/OS:*:*) + GUESS=$UNAME_MACHINE-unknown-bsdi$UNAME_RELEASE + ;; + arm:FreeBSD:*:*) + UNAME_PROCESSOR=`uname -p` + set_cc_for_build + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + FREEBSD_REL=`echo "$UNAME_RELEASE" | sed -e 's/[-(].*//'` + GUESS=$UNAME_PROCESSOR-unknown-freebsd$FREEBSD_REL-gnueabi + else + FREEBSD_REL=`echo "$UNAME_RELEASE" | sed -e 's/[-(].*//'` + GUESS=$UNAME_PROCESSOR-unknown-freebsd$FREEBSD_REL-gnueabihf + fi + ;; + *:FreeBSD:*:*) + UNAME_PROCESSOR=`/usr/bin/uname -p` + case $UNAME_PROCESSOR in + amd64) + UNAME_PROCESSOR=x86_64 ;; + i386) + UNAME_PROCESSOR=i586 ;; + esac + FREEBSD_REL=`echo "$UNAME_RELEASE" | sed -e 's/[-(].*//'` + GUESS=$UNAME_PROCESSOR-unknown-freebsd$FREEBSD_REL + ;; + i*:CYGWIN*:*) + GUESS=$UNAME_MACHINE-pc-cygwin + ;; + *:MINGW64*:*) + GUESS=$UNAME_MACHINE-pc-mingw64 + ;; + *:MINGW*:*) + GUESS=$UNAME_MACHINE-pc-mingw32 + ;; + *:MSYS*:*) + GUESS=$UNAME_MACHINE-pc-msys + ;; + i*:PW*:*) + GUESS=$UNAME_MACHINE-pc-pw32 + ;; + *:Interix*:*) + case $UNAME_MACHINE in + x86) + GUESS=i586-pc-interix$UNAME_RELEASE + ;; + authenticamd | genuineintel | EM64T) + GUESS=x86_64-unknown-interix$UNAME_RELEASE + ;; + IA64) + GUESS=ia64-unknown-interix$UNAME_RELEASE + ;; + esac ;; + i*:UWIN*:*) + GUESS=$UNAME_MACHINE-pc-uwin + ;; + amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) + GUESS=x86_64-pc-cygwin + ;; + prep*:SunOS:5.*:*) + SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'` + GUESS=powerpcle-unknown-solaris2$SUN_REL + ;; + *:GNU:*:*) + # the GNU system + GNU_ARCH=`echo "$UNAME_MACHINE" | sed -e 's,[-/].*$,,'` + GNU_REL=`echo "$UNAME_RELEASE" | sed -e 's,/.*$,,'` + GUESS=$GNU_ARCH-unknown-$LIBC$GNU_REL + ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + GNU_SYS=`echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"` + GNU_REL=`echo "$UNAME_RELEASE" | sed -e 's/[-(].*//'` + GUESS=$UNAME_MACHINE-unknown-$GNU_SYS$GNU_REL-$LIBC + ;; + *:Minix:*:*) + GUESS=$UNAME_MACHINE-unknown-minix + ;; + aarch64:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + aarch64_be:Linux:*:*) + UNAME_MACHINE=aarch64_be + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + alpha:Linux:*:*) + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' /proc/cpuinfo 2>/dev/null` in + EV5) UNAME_MACHINE=alphaev5 ;; + EV56) UNAME_MACHINE=alphaev56 ;; + PCA56) UNAME_MACHINE=alphapca56 ;; + PCA57) UNAME_MACHINE=alphapca56 ;; + EV6) UNAME_MACHINE=alphaev6 ;; + EV67) UNAME_MACHINE=alphaev67 ;; + EV68*) UNAME_MACHINE=alphaev68 ;; + esac + objdump --private-headers /bin/sh | grep -q ld.so.1 + if test "$?" = 0 ; then LIBC=gnulibc1 ; fi + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + arc:Linux:*:* | arceb:Linux:*:* | arc32:Linux:*:* | arc64:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + arm*:Linux:*:*) + set_cc_for_build + if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_EABI__ + then + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + else + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + GUESS=$UNAME_MACHINE-unknown-linux-${LIBC}eabi + else + GUESS=$UNAME_MACHINE-unknown-linux-${LIBC}eabihf + fi + fi + ;; + avr32*:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + cris:Linux:*:*) + GUESS=$UNAME_MACHINE-axis-linux-$LIBC + ;; + crisv32:Linux:*:*) + GUESS=$UNAME_MACHINE-axis-linux-$LIBC + ;; + e2k:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + frv:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + hexagon:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + i*86:Linux:*:*) + GUESS=$UNAME_MACHINE-pc-linux-$LIBC + ;; + ia64:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + k1om:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + loongarch32:Linux:*:* | loongarch64:Linux:*:* | loongarchx32:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + m32r*:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + m68*:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + mips:Linux:*:* | mips64:Linux:*:*) + set_cc_for_build + IS_GLIBC=0 + test x"${LIBC}" = xgnu && IS_GLIBC=1 + sed 's/^ //' << EOF > "$dummy.c" + #undef CPU + #undef mips + #undef mipsel + #undef mips64 + #undef mips64el + #if ${IS_GLIBC} && defined(_ABI64) + LIBCABI=gnuabi64 + #else + #if ${IS_GLIBC} && defined(_ABIN32) + LIBCABI=gnuabin32 + #else + LIBCABI=${LIBC} + #endif + #endif + + #if ${IS_GLIBC} && defined(__mips64) && defined(__mips_isa_rev) && __mips_isa_rev>=6 + CPU=mipsisa64r6 + #else + #if ${IS_GLIBC} && !defined(__mips64) && defined(__mips_isa_rev) && __mips_isa_rev>=6 + CPU=mipsisa32r6 + #else + #if defined(__mips64) + CPU=mips64 + #else + CPU=mips + #endif + #endif + #endif + + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + MIPS_ENDIAN=el + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + MIPS_ENDIAN= + #else + MIPS_ENDIAN= + #endif + #endif +EOF + cc_set_vars=`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU\|^MIPS_ENDIAN\|^LIBCABI'` + eval "$cc_set_vars" + test "x$CPU" != x && { echo "$CPU${MIPS_ENDIAN}-unknown-linux-$LIBCABI"; exit; } + ;; + mips64el:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + openrisc*:Linux:*:*) + GUESS=or1k-unknown-linux-$LIBC + ;; + or32:Linux:*:* | or1k*:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + padre:Linux:*:*) + GUESS=sparc-unknown-linux-$LIBC + ;; + parisc64:Linux:*:* | hppa64:Linux:*:*) + GUESS=hppa64-unknown-linux-$LIBC + ;; + parisc:Linux:*:* | hppa:Linux:*:*) + # Look for CPU level + case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in + PA7*) GUESS=hppa1.1-unknown-linux-$LIBC ;; + PA8*) GUESS=hppa2.0-unknown-linux-$LIBC ;; + *) GUESS=hppa-unknown-linux-$LIBC ;; + esac + ;; + ppc64:Linux:*:*) + GUESS=powerpc64-unknown-linux-$LIBC + ;; + ppc:Linux:*:*) + GUESS=powerpc-unknown-linux-$LIBC + ;; + ppc64le:Linux:*:*) + GUESS=powerpc64le-unknown-linux-$LIBC + ;; + ppcle:Linux:*:*) + GUESS=powerpcle-unknown-linux-$LIBC + ;; + riscv32:Linux:*:* | riscv32be:Linux:*:* | riscv64:Linux:*:* | riscv64be:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + s390:Linux:*:* | s390x:Linux:*:*) + GUESS=$UNAME_MACHINE-ibm-linux-$LIBC + ;; + sh64*:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + sh*:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + sparc:Linux:*:* | sparc64:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + tile*:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + vax:Linux:*:*) + GUESS=$UNAME_MACHINE-dec-linux-$LIBC + ;; + x86_64:Linux:*:*) + set_cc_for_build + LIBCABI=$LIBC + if test "$CC_FOR_BUILD" != no_compiler_found; then + if (echo '#ifdef __ILP32__'; echo IS_X32; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_X32 >/dev/null + then + LIBCABI=${LIBC}x32 + fi + fi + GUESS=$UNAME_MACHINE-pc-linux-$LIBCABI + ;; + xtensa*:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. + # earlier versions are messed up and put the nodename in both + # sysname and nodename. + GUESS=i386-sequent-sysv4 + ;; + i*86:UNIX_SV:4.2MP:2.*) + # Unixware is an offshoot of SVR4, but it has its own version + # number series starting with 2... + # I am not positive that other SVR4 systems won't match this, + # I just have to hope. -- rms. + # Use sysv4.2uw... so that sysv4* matches it. + GUESS=$UNAME_MACHINE-pc-sysv4.2uw$UNAME_VERSION + ;; + i*86:OS/2:*:*) + # If we were able to find `uname', then EMX Unix compatibility + # is probably installed. + GUESS=$UNAME_MACHINE-pc-os2-emx + ;; + i*86:XTS-300:*:STOP) + GUESS=$UNAME_MACHINE-unknown-stop + ;; + i*86:atheos:*:*) + GUESS=$UNAME_MACHINE-unknown-atheos + ;; + i*86:syllable:*:*) + GUESS=$UNAME_MACHINE-pc-syllable + ;; + i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) + GUESS=i386-unknown-lynxos$UNAME_RELEASE + ;; + i*86:*DOS:*:*) + GUESS=$UNAME_MACHINE-pc-msdosdjgpp + ;; + i*86:*:4.*:*) + UNAME_REL=`echo "$UNAME_RELEASE" | sed 's/\/MP$//'` + if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then + GUESS=$UNAME_MACHINE-univel-sysv$UNAME_REL + else + GUESS=$UNAME_MACHINE-pc-sysv$UNAME_REL + fi + ;; + i*86:*:5:[678]*) + # UnixWare 7.x, OpenUNIX and OpenServer 6. + case `/bin/uname -X | grep "^Machine"` in + *486*) UNAME_MACHINE=i486 ;; + *Pentium) UNAME_MACHINE=i586 ;; + *Pent*|*Celeron) UNAME_MACHINE=i686 ;; + esac + GUESS=$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + ;; + i*86:*:3.2:*) + if test -f /usr/options/cb.name; then + UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then + UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` + (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 + (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ + && UNAME_MACHINE=i586 + (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ + && UNAME_MACHINE=i686 + (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ + && UNAME_MACHINE=i686 + GUESS=$UNAME_MACHINE-pc-sco$UNAME_REL + else + GUESS=$UNAME_MACHINE-pc-sysv32 + fi + ;; + pc:*:*:*) + # Left here for compatibility: + # uname -m prints for DJGPP always 'pc', but it prints nothing about + # the processor, so we play safe by assuming i586. + # Note: whatever this is, it MUST be the same as what config.sub + # prints for the "djgpp" host, or else GDB configure will decide that + # this is a cross-build. + GUESS=i586-pc-msdosdjgpp + ;; + Intel:Mach:3*:*) + GUESS=i386-pc-mach3 + ;; + paragon:*:*:*) + GUESS=i860-intel-osf1 + ;; + i860:*:4.*:*) # i860-SVR4 + if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then + GUESS=i860-stardent-sysv$UNAME_RELEASE # Stardent Vistra i860-SVR4 + else # Add other i860-SVR4 vendors below as they are discovered. + GUESS=i860-unknown-sysv$UNAME_RELEASE # Unknown i860-SVR4 + fi + ;; + mini*:CTIX:SYS*5:*) + # "miniframe" + GUESS=m68010-convergent-sysv + ;; + mc68k:UNIX:SYSTEM5:3.51m) + GUESS=m68k-convergent-sysv + ;; + M680?0:D-NIX:5.3:*) + GUESS=m68k-diab-dnix + ;; + M68*:*:R3V[5678]*:*) + test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) + OS_REL='' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; + 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4; exit; } ;; + NCR*:*:4.2:* | MPRAS*:*:4.2:*) + OS_REL='.3' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } + /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; + m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) + GUESS=m68k-unknown-lynxos$UNAME_RELEASE + ;; + mc68030:UNIX_System_V:4.*:*) + GUESS=m68k-atari-sysv4 + ;; + TSUNAMI:LynxOS:2.*:*) + GUESS=sparc-unknown-lynxos$UNAME_RELEASE + ;; + rs6000:LynxOS:2.*:*) + GUESS=rs6000-unknown-lynxos$UNAME_RELEASE + ;; + PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) + GUESS=powerpc-unknown-lynxos$UNAME_RELEASE + ;; + SM[BE]S:UNIX_SV:*:*) + GUESS=mips-dde-sysv$UNAME_RELEASE + ;; + RM*:ReliantUNIX-*:*:*) + GUESS=mips-sni-sysv4 + ;; + RM*:SINIX-*:*:*) + GUESS=mips-sni-sysv4 + ;; + *:SINIX-*:*:*) + if uname -p 2>/dev/null >/dev/null ; then + UNAME_MACHINE=`(uname -p) 2>/dev/null` + GUESS=$UNAME_MACHINE-sni-sysv4 + else + GUESS=ns32k-sni-sysv + fi + ;; + PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + # says + GUESS=i586-unisys-sysv4 + ;; + *:UNIX_System_V:4*:FTX*) + # From Gerald Hewes . + # How about differentiating between stratus architectures? -djm + GUESS=hppa1.1-stratus-sysv4 + ;; + *:*:*:FTX*) + # From seanf@swdc.stratus.com. + GUESS=i860-stratus-sysv4 + ;; + i*86:VOS:*:*) + # From Paul.Green@stratus.com. + GUESS=$UNAME_MACHINE-stratus-vos + ;; + *:VOS:*:*) + # From Paul.Green@stratus.com. + GUESS=hppa1.1-stratus-vos + ;; + mc68*:A/UX:*:*) + GUESS=m68k-apple-aux$UNAME_RELEASE + ;; + news*:NEWS-OS:6*:*) + GUESS=mips-sony-newsos6 + ;; + R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) + if test -d /usr/nec; then + GUESS=mips-nec-sysv$UNAME_RELEASE + else + GUESS=mips-unknown-sysv$UNAME_RELEASE + fi + ;; + BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. + GUESS=powerpc-be-beos + ;; + BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. + GUESS=powerpc-apple-beos + ;; + BePC:BeOS:*:*) # BeOS running on Intel PC compatible. + GUESS=i586-pc-beos + ;; + BePC:Haiku:*:*) # Haiku running on Intel PC compatible. + GUESS=i586-pc-haiku + ;; + x86_64:Haiku:*:*) + GUESS=x86_64-unknown-haiku + ;; + SX-4:SUPER-UX:*:*) + GUESS=sx4-nec-superux$UNAME_RELEASE + ;; + SX-5:SUPER-UX:*:*) + GUESS=sx5-nec-superux$UNAME_RELEASE + ;; + SX-6:SUPER-UX:*:*) + GUESS=sx6-nec-superux$UNAME_RELEASE + ;; + SX-7:SUPER-UX:*:*) + GUESS=sx7-nec-superux$UNAME_RELEASE + ;; + SX-8:SUPER-UX:*:*) + GUESS=sx8-nec-superux$UNAME_RELEASE + ;; + SX-8R:SUPER-UX:*:*) + GUESS=sx8r-nec-superux$UNAME_RELEASE + ;; + SX-ACE:SUPER-UX:*:*) + GUESS=sxace-nec-superux$UNAME_RELEASE + ;; + Power*:Rhapsody:*:*) + GUESS=powerpc-apple-rhapsody$UNAME_RELEASE + ;; + *:Rhapsody:*:*) + GUESS=$UNAME_MACHINE-apple-rhapsody$UNAME_RELEASE + ;; + arm64:Darwin:*:*) + GUESS=aarch64-apple-darwin$UNAME_RELEASE + ;; + *:Darwin:*:*) + UNAME_PROCESSOR=`uname -p` + case $UNAME_PROCESSOR in + unknown) UNAME_PROCESSOR=powerpc ;; + esac + if command -v xcode-select > /dev/null 2> /dev/null && \ + ! xcode-select --print-path > /dev/null 2> /dev/null ; then + # Avoid executing cc if there is no toolchain installed as + # cc will be a stub that puts up a graphical alert + # prompting the user to install developer tools. + CC_FOR_BUILD=no_compiler_found + else + set_cc_for_build + fi + if test "$CC_FOR_BUILD" != no_compiler_found; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + case $UNAME_PROCESSOR in + i386) UNAME_PROCESSOR=x86_64 ;; + powerpc) UNAME_PROCESSOR=powerpc64 ;; + esac + fi + # On 10.4-10.6 one might compile for PowerPC via gcc -arch ppc + if (echo '#ifdef __POWERPC__'; echo IS_PPC; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_PPC >/dev/null + then + UNAME_PROCESSOR=powerpc + fi + elif test "$UNAME_PROCESSOR" = i386 ; then + # uname -m returns i386 or x86_64 + UNAME_PROCESSOR=$UNAME_MACHINE + fi + GUESS=$UNAME_PROCESSOR-apple-darwin$UNAME_RELEASE + ;; + *:procnto*:*:* | *:QNX:[0123456789]*:*) + UNAME_PROCESSOR=`uname -p` + if test "$UNAME_PROCESSOR" = x86; then + UNAME_PROCESSOR=i386 + UNAME_MACHINE=pc + fi + GUESS=$UNAME_PROCESSOR-$UNAME_MACHINE-nto-qnx$UNAME_RELEASE + ;; + *:QNX:*:4*) + GUESS=i386-pc-qnx + ;; + NEO-*:NONSTOP_KERNEL:*:*) + GUESS=neo-tandem-nsk$UNAME_RELEASE + ;; + NSE-*:NONSTOP_KERNEL:*:*) + GUESS=nse-tandem-nsk$UNAME_RELEASE + ;; + NSR-*:NONSTOP_KERNEL:*:*) + GUESS=nsr-tandem-nsk$UNAME_RELEASE + ;; + NSV-*:NONSTOP_KERNEL:*:*) + GUESS=nsv-tandem-nsk$UNAME_RELEASE + ;; + NSX-*:NONSTOP_KERNEL:*:*) + GUESS=nsx-tandem-nsk$UNAME_RELEASE + ;; + *:NonStop-UX:*:*) + GUESS=mips-compaq-nonstopux + ;; + BS2000:POSIX*:*:*) + GUESS=bs2000-siemens-sysv + ;; + DS/*:UNIX_System_V:*:*) + GUESS=$UNAME_MACHINE-$UNAME_SYSTEM-$UNAME_RELEASE + ;; + *:Plan9:*:*) + # "uname -m" is not consistent, so use $cputype instead. 386 + # is converted to i386 for consistency with other x86 + # operating systems. + if test "${cputype-}" = 386; then + UNAME_MACHINE=i386 + elif test "x${cputype-}" != x; then + UNAME_MACHINE=$cputype + fi + GUESS=$UNAME_MACHINE-unknown-plan9 + ;; + *:TOPS-10:*:*) + GUESS=pdp10-unknown-tops10 + ;; + *:TENEX:*:*) + GUESS=pdp10-unknown-tenex + ;; + KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) + GUESS=pdp10-dec-tops20 + ;; + XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) + GUESS=pdp10-xkl-tops20 + ;; + *:TOPS-20:*:*) + GUESS=pdp10-unknown-tops20 + ;; + *:ITS:*:*) + GUESS=pdp10-unknown-its + ;; + SEI:*:*:SEIUX) + GUESS=mips-sei-seiux$UNAME_RELEASE + ;; + *:DragonFly:*:*) + DRAGONFLY_REL=`echo "$UNAME_RELEASE" | sed -e 's/[-(].*//'` + GUESS=$UNAME_MACHINE-unknown-dragonfly$DRAGONFLY_REL + ;; + *:*VMS:*:*) + UNAME_MACHINE=`(uname -p) 2>/dev/null` + case $UNAME_MACHINE in + A*) GUESS=alpha-dec-vms ;; + I*) GUESS=ia64-dec-vms ;; + V*) GUESS=vax-dec-vms ;; + esac ;; + *:XENIX:*:SysV) + GUESS=i386-pc-xenix + ;; + i*86:skyos:*:*) + SKYOS_REL=`echo "$UNAME_RELEASE" | sed -e 's/ .*$//'` + GUESS=$UNAME_MACHINE-pc-skyos$SKYOS_REL + ;; + i*86:rdos:*:*) + GUESS=$UNAME_MACHINE-pc-rdos + ;; + *:AROS:*:*) + GUESS=$UNAME_MACHINE-unknown-aros + ;; + x86_64:VMkernel:*:*) + GUESS=$UNAME_MACHINE-unknown-esx + ;; + amd64:Isilon\ OneFS:*:*) + GUESS=x86_64-unknown-onefs + ;; + *:Unleashed:*:*) + GUESS=$UNAME_MACHINE-unknown-unleashed$UNAME_RELEASE + ;; +esac + +# Do we have a guess based on uname results? +if test "x$GUESS" != x; then + echo "$GUESS" + exit +fi + +# No uname command or uname output not recognized. +set_cc_for_build +cat > "$dummy.c" < +#include +#endif +#if defined(ultrix) || defined(_ultrix) || defined(__ultrix) || defined(__ultrix__) +#if defined (vax) || defined (__vax) || defined (__vax__) || defined(mips) || defined(__mips) || defined(__mips__) || defined(MIPS) || defined(__MIPS__) +#include +#if defined(_SIZE_T_) || defined(SIGLOST) +#include +#endif +#endif +#endif +main () +{ +#if defined (sony) +#if defined (MIPSEB) + /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, + I don't know.... */ + printf ("mips-sony-bsd\n"); exit (0); +#else +#include + printf ("m68k-sony-newsos%s\n", +#ifdef NEWSOS4 + "4" +#else + "" +#endif + ); exit (0); +#endif +#endif + +#if defined (NeXT) +#if !defined (__ARCHITECTURE__) +#define __ARCHITECTURE__ "m68k" +#endif + int version; + version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`; + if (version < 4) + printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); + else + printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); + exit (0); +#endif + +#if defined (MULTIMAX) || defined (n16) +#if defined (UMAXV) + printf ("ns32k-encore-sysv\n"); exit (0); +#else +#if defined (CMU) + printf ("ns32k-encore-mach\n"); exit (0); +#else + printf ("ns32k-encore-bsd\n"); exit (0); +#endif +#endif +#endif + +#if defined (__386BSD__) + printf ("i386-pc-bsd\n"); exit (0); +#endif + +#if defined (sequent) +#if defined (i386) + printf ("i386-sequent-dynix\n"); exit (0); +#endif +#if defined (ns32000) + printf ("ns32k-sequent-dynix\n"); exit (0); +#endif +#endif + +#if defined (_SEQUENT_) + struct utsname un; + + uname(&un); + if (strncmp(un.version, "V2", 2) == 0) { + printf ("i386-sequent-ptx2\n"); exit (0); + } + if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ + printf ("i386-sequent-ptx1\n"); exit (0); + } + printf ("i386-sequent-ptx\n"); exit (0); +#endif + +#if defined (vax) +#if !defined (ultrix) +#include +#if defined (BSD) +#if BSD == 43 + printf ("vax-dec-bsd4.3\n"); exit (0); +#else +#if BSD == 199006 + printf ("vax-dec-bsd4.3reno\n"); exit (0); +#else + printf ("vax-dec-bsd\n"); exit (0); +#endif +#endif +#else + printf ("vax-dec-bsd\n"); exit (0); +#endif +#else +#if defined(_SIZE_T_) || defined(SIGLOST) + struct utsname un; + uname (&un); + printf ("vax-dec-ultrix%s\n", un.release); exit (0); +#else + printf ("vax-dec-ultrix\n"); exit (0); +#endif +#endif +#endif +#if defined(ultrix) || defined(_ultrix) || defined(__ultrix) || defined(__ultrix__) +#if defined(mips) || defined(__mips) || defined(__mips__) || defined(MIPS) || defined(__MIPS__) +#if defined(_SIZE_T_) || defined(SIGLOST) + struct utsname *un; + uname (&un); + printf ("mips-dec-ultrix%s\n", un.release); exit (0); +#else + printf ("mips-dec-ultrix\n"); exit (0); +#endif +#endif +#endif + +#if defined (alliant) && defined (i860) + printf ("i860-alliant-bsd\n"); exit (0); +#endif + + exit (1); +} +EOF + +$CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null && SYSTEM_NAME=`"$dummy"` && + { echo "$SYSTEM_NAME"; exit; } + +# Apollos put the system type in the environment. +test -d /usr/apollo && { echo "$ISP-apollo-$SYSTYPE"; exit; } + +echo "$0: unable to guess system type" >&2 + +case $UNAME_MACHINE:$UNAME_SYSTEM in + mips:Linux | mips64:Linux) + # If we got here on MIPS GNU/Linux, output extra information. + cat >&2 <&2 <&2 </dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null` + +hostinfo = `(hostinfo) 2>/dev/null` +/bin/universe = `(/bin/universe) 2>/dev/null` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` +/bin/arch = `(/bin/arch) 2>/dev/null` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` + +UNAME_MACHINE = "$UNAME_MACHINE" +UNAME_RELEASE = "$UNAME_RELEASE" +UNAME_SYSTEM = "$UNAME_SYSTEM" +UNAME_VERSION = "$UNAME_VERSION" +EOF +fi + +exit 1 + +# Local variables: +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/pkg/enet/enet/config.log b/pkg/enet/enet/config.log new file mode 100644 index 000000000..3dda27022 --- /dev/null +++ b/pkg/enet/enet/config.log @@ -0,0 +1,861 @@ +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by libenet configure 1.3.18, which was +generated by GNU Autoconf 2.71. Invocation command line was + + $ ./configure + +## --------- ## +## Platform. ## +## --------- ## + +hostname = Air-Caleb-2 +uname -m = arm64 +uname -r = 22.1.0 +uname -s = Darwin +uname -v = Darwin Kernel Version 22.1.0: Sun Oct 9 20:15:52 PDT 2022; root:xnu-8792.41.9~2/RELEASE_ARM64_T8112 + +/usr/bin/uname -p = arm +/bin/uname -X = unknown + +/bin/arch = unknown +/usr/bin/arch -k = unknown +/usr/convex/getsysinfo = unknown +/usr/bin/hostinfo = Mach kernel version: + Darwin Kernel Version 22.1.0: Sun Oct 9 20:15:52 PDT 2022; root:xnu-8792.41.9~2/RELEASE_ARM64_T8112 +Kernel configured for up to 8 processors. +8 processors are physically available. +8 processors are logically available. +Processor type: arm64e (ARM64E) +Processors active: 0 1 2 3 4 5 6 7 +Primary memory available: 8.00 gigabytes +Default processor set: 381 tasks, 2569 threads, 8 processors +Load average: 2.35, Mach factor: 5.63 +/bin/machine = unknown +/usr/bin/oslevel = unknown +/bin/universe = unknown + +PATH: /opt/homebrew/opt/node@20/bin/ +PATH: /opt/homebrew/opt/ruby/bin/ +PATH: /Users/cfoust/.cargo/bin/ +PATH: /opt/homebrew/Cellar/janet/1.27.0/bin/ +PATH: /Users/cfoust/go/bin/ +PATH: /opt/local/bin/ +PATH: /opt/local/sbin/ +PATH: /opt/homebrew/bin/ +PATH: /opt/homebrew/sbin/ +PATH: /usr/local/bin/ +PATH: /System/Cryptexes/App/usr/bin/ +PATH: /usr/bin/ +PATH: /bin/ +PATH: /usr/sbin/ +PATH: /sbin/ +PATH: /Library/TeX/texbin/ +PATH: /usr/local/go/bin/ +PATH: /Library/Apple/usr/bin/ +PATH: /Applications/kitty.app/Contents/MacOS/ +PATH: /opt/homebrew/bin/ +PATH: /Users/cfoust/.fzf/bin/ +PATH: /Users/cfoust/cawnfig/bin/ +PATH: /Users/cfoust/cawnfig/bin/ +PATH: /Users/cfoust/cawnfig/bin/ + + +## ----------- ## +## Core tests. ## +## ----------- ## + +configure:2469: looking for aux files: config.guess config.sub ltmain.sh compile missing install-sh +configure:2482: trying ./ +configure:2511: ./config.guess found +configure:2511: ./config.sub found +configure:2511: ./ltmain.sh found +configure:2511: ./compile found +configure:2511: ./missing found +configure:2493: ./install-sh found +configure:2641: checking for a BSD-compatible install +configure:2714: result: /usr/bin/install -c +configure:2725: checking whether build environment is sane +configure:2780: result: yes +configure:2939: checking for a race-free mkdir -p +configure:2983: result: ./install-sh -c -d +configure:2990: checking for gawk +configure:3025: result: no +configure:2990: checking for mawk +configure:3025: result: no +configure:2990: checking for nawk +configure:3025: result: no +configure:2990: checking for awk +configure:3011: found /usr/bin/awk +configure:3022: result: awk +configure:3033: checking whether make sets $(MAKE) +configure:3056: result: yes +configure:3086: checking whether make supports nested variables +configure:3104: result: yes +configure:3305: checking for gcc +configure:3326: found /usr/bin/gcc +configure:3337: result: gcc +configure:3690: checking for C compiler version +configure:3699: gcc --version >&5 +Apple clang version 14.0.3 (clang-1403.0.22.14.1) +Target: arm64-apple-darwin22.1.0 +Thread model: posix +InstalledDir: /Library/Developer/CommandLineTools/usr/bin +configure:3710: $? = 0 +configure:3699: gcc -v >&5 +Apple clang version 14.0.3 (clang-1403.0.22.14.1) +Target: arm64-apple-darwin22.1.0 +Thread model: posix +InstalledDir: /Library/Developer/CommandLineTools/usr/bin +configure:3710: $? = 0 +configure:3699: gcc -V >&5 +clang: error: argument to '-V' is missing (expected 1 value) +clang: error: no input files +configure:3710: $? = 1 +configure:3699: gcc -qversion >&5 +clang: error: unknown argument '-qversion'; did you mean '--version'? +clang: error: no input files +configure:3710: $? = 1 +configure:3699: gcc -version >&5 +clang: error: unknown argument '-version'; did you mean '--version'? +clang: error: no input files +configure:3710: $? = 1 +configure:3730: checking whether the C compiler works +configure:3752: gcc conftest.c >&5 +configure:3756: $? = 0 +configure:3806: result: yes +configure:3809: checking for C compiler default output file name +configure:3811: result: a.out +configure:3817: checking for suffix of executables +configure:3824: gcc -o conftest conftest.c >&5 +configure:3828: $? = 0 +configure:3851: result: +configure:3873: checking whether we are cross compiling +configure:3881: gcc -o conftest conftest.c >&5 +configure:3885: $? = 0 +configure:3892: ./conftest +configure:3896: $? = 0 +configure:3911: result: no +configure:3916: checking for suffix of object files +configure:3939: gcc -c conftest.c >&5 +configure:3943: $? = 0 +configure:3965: result: o +configure:3969: checking whether the compiler supports GNU C +configure:3989: gcc -c conftest.c >&5 +configure:3989: $? = 0 +configure:3999: result: yes +configure:4010: checking whether gcc accepts -g +configure:4031: gcc -c -g conftest.c >&5 +configure:4031: $? = 0 +configure:4075: result: yes +configure:4095: checking for gcc option to enable C11 features +configure:4110: gcc -c -g -O2 conftest.c >&5 +conftest.c:25:14: warning: a function declaration without a prototype is deprecated in all versions of C and is not supported in C2x [-Wdeprecated-non-prototype] +static char *e (p, i) + ^ +1 warning generated. +configure:4110: $? = 0 +configure:4128: result: none needed +configure:4244: checking whether gcc understands -c and -o together +configure:4267: gcc -c conftest.c -o conftest2.o +configure:4270: $? = 0 +configure:4267: gcc -c conftest.c -o conftest2.o +configure:4270: $? = 0 +configure:4282: result: yes +configure:4302: checking whether make supports the include directive +configure:4317: make -f confmf.GNU && cat confinc.out +this is the am__doit target +configure:4320: $? = 0 +configure:4339: result: yes (GNU style) +configure:4365: checking dependency style of gcc +configure:4477: result: gcc3 +configure:4524: checking build system type +configure:4539: result: aarch64-apple-darwin22.1.0 +configure:4559: checking host system type +configure:4573: result: aarch64-apple-darwin22.1.0 +configure:4614: checking how to print strings +configure:4641: result: printf +configure:4662: checking for a sed that does not truncate output +configure:4732: result: /usr/bin/sed +configure:4750: checking for grep that handles long lines and -e +configure:4814: result: /usr/bin/grep +configure:4819: checking for egrep +configure:4887: result: /usr/bin/grep -E +configure:4892: checking for fgrep +configure:4960: result: /usr/bin/grep -F +configure:4996: checking for ld used by gcc +configure:5064: result: /Library/Developer/CommandLineTools/usr/bin/ld +configure:5071: checking if the linker (/Library/Developer/CommandLineTools/usr/bin/ld) is GNU ld +configure:5087: result: no +configure:5099: checking for BSD- or MS-compatible name lister (nm) +configure:5154: result: /usr/bin/nm -B +configure:5294: checking the name lister (/usr/bin/nm -B) interface +configure:5302: gcc -c -g -O2 conftest.c >&5 +configure:5305: /usr/bin/nm -B "conftest.o" +configure:5308: output +0000000000000230 S _some_variable +0000000000000000 t ltmp0 +0000000000000230 s ltmp1 +configure:5315: result: BSD nm +configure:5318: checking whether ln -s works +configure:5322: result: yes +configure:5330: checking the maximum length of command line arguments +configure:5462: result: 786432 +configure:5510: checking how to convert aarch64-apple-darwin22.1.0 file names to aarch64-apple-darwin22.1.0 format +configure:5551: result: func_convert_file_noop +configure:5558: checking how to convert aarch64-apple-darwin22.1.0 file names to toolchain format +configure:5579: result: func_convert_file_noop +configure:5586: checking for /Library/Developer/CommandLineTools/usr/bin/ld option to reload object files +configure:5594: result: -r +configure:5673: checking for file +configure:5694: found /usr/bin/file +configure:5705: result: file +configure:5781: checking for objdump +configure:5802: found /usr/bin/objdump +configure:5813: result: objdump +configure:5845: checking how to recognize dependent libraries +configure:6046: result: pass_all +configure:6136: checking for dlltool +configure:6171: result: no +configure:6201: checking how to associate runtime and link libraries +configure:6229: result: printf %s\n +configure:6295: checking for ar +configure:6316: found /usr/bin/ar +configure:6327: result: ar +configure:6380: checking for archiver @FILE support +configure:6398: gcc -c -g -O2 conftest.c >&5 +configure:6398: $? = 0 +configure:6402: ar cr libconftest.a @conftest.lst >&5 +ar: @conftest.lst: No such file or directory +configure:6405: $? = 1 +configure:6425: result: no +configure:6488: checking for strip +configure:6509: found /usr/bin/strip +configure:6520: result: strip +configure:6597: checking for ranlib +configure:6618: found /usr/bin/ranlib +configure:6629: result: ranlib +configure:6731: checking command to parse /usr/bin/nm -B output from gcc object +configure:6885: gcc -c -g -O2 conftest.c >&5 +configure:6888: $? = 0 +configure:6892: /usr/bin/nm -B conftest.o \| /usr/bin/sed -n -e 's/^.*[ ]\([ABCDGIRSTW][ABCDGIRSTW]*\)[ ][ ]*\([_A-Za-z][_A-Za-z0-9]*\)$/\1 \2 \2/p' | /usr/bin/sed '/ __gnu_lto/d' \> conftest.nm +configure:6895: $? = 0 +cannot find nm_test_var in conftest.nm +configure:6885: gcc -c -g -O2 conftest.c >&5 +configure:6888: $? = 0 +configure:6892: /usr/bin/nm -B conftest.o \| /usr/bin/sed -n -e 's/^.*[ ]\([ABCDGIRSTW][ABCDGIRSTW]*\)[ ][ ]*_\([_A-Za-z][_A-Za-z0-9]*\)$/\1 _\2 \2/p' | /usr/bin/sed '/ __gnu_lto/d' \> conftest.nm +configure:6895: $? = 0 +configure:6961: gcc -o conftest -g -O2 conftest.c conftstm.o >&5 +warning: (arm64) could not find object file symbol for symbol _main +configure:6964: $? = 0 +configure:7002: result: ok +configure:7049: checking for sysroot +configure:7080: result: no +configure:7087: checking for a working dd +configure:7131: result: /bin/dd +configure:7135: checking how to truncate binary pipes +configure:7151: result: /bin/dd bs=4096 count=1 +configure:7488: checking for mt +configure:7523: result: no +configure:7543: checking if : is a manifest tool +configure:7550: : '-?' +configure:7558: result: no +configure:7619: checking for dsymutil +configure:7640: found /usr/bin/dsymutil +configure:7651: result: dsymutil +configure:7721: checking for nmedit +configure:7742: found /usr/bin/nmedit +configure:7753: result: nmedit +configure:7823: checking for lipo +configure:7844: found /usr/bin/lipo +configure:7855: result: lipo +configure:7925: checking for otool +configure:7946: found /usr/bin/otool +configure:7957: result: otool +configure:8027: checking for otool64 +configure:8062: result: no +configure:8107: checking for -single_module linker flag +gcc -g -O2 -o libconftest.dylib -dynamiclib -Wl,-single_module conftest.c +configure:8141: result: yes +configure:8144: checking for -exported_symbols_list linker flag +configure:8165: gcc -o conftest -g -O2 -Wl,-exported_symbols_list,conftest.sym conftest.c >&5 +configure:8165: $? = 0 +configure:8176: result: yes +configure:8179: checking for -force_load linker flag +gcc -g -O2 -c -o conftest.o conftest.c +ar cr libconftest.a conftest.o +ranlib libconftest.a +gcc -g -O2 -o conftest conftest.c -Wl,-force_load,./libconftest.a +configure:8212: result: yes +configure:8283: checking for stdio.h +configure:8283: gcc -c -g -O2 conftest.c >&5 +configure:8283: $? = 0 +configure:8283: result: yes +configure:8283: checking for stdlib.h +configure:8283: gcc -c -g -O2 conftest.c >&5 +configure:8283: $? = 0 +configure:8283: result: yes +configure:8283: checking for string.h +configure:8283: gcc -c -g -O2 conftest.c >&5 +configure:8283: $? = 0 +configure:8283: result: yes +configure:8283: checking for inttypes.h +configure:8283: gcc -c -g -O2 conftest.c >&5 +configure:8283: $? = 0 +configure:8283: result: yes +configure:8283: checking for stdint.h +configure:8283: gcc -c -g -O2 conftest.c >&5 +configure:8283: $? = 0 +configure:8283: result: yes +configure:8283: checking for strings.h +configure:8283: gcc -c -g -O2 conftest.c >&5 +configure:8283: $? = 0 +configure:8283: result: yes +configure:8283: checking for sys/stat.h +configure:8283: gcc -c -g -O2 conftest.c >&5 +configure:8283: $? = 0 +configure:8283: result: yes +configure:8283: checking for sys/types.h +configure:8283: gcc -c -g -O2 conftest.c >&5 +configure:8283: $? = 0 +configure:8283: result: yes +configure:8283: checking for unistd.h +configure:8283: gcc -c -g -O2 conftest.c >&5 +configure:8283: $? = 0 +configure:8283: result: yes +configure:8308: checking for dlfcn.h +configure:8308: gcc -c -g -O2 conftest.c >&5 +configure:8308: $? = 0 +configure:8308: result: yes +configure:8567: checking for objdir +configure:8583: result: .libs +configure:8847: checking if gcc supports -fno-rtti -fno-exceptions +configure:8866: gcc -c -g -O2 -fno-rtti -fno-exceptions conftest.c >&5 +configure:8870: $? = 0 +configure:8883: result: yes +configure:9241: checking for gcc option to produce PIC +configure:9249: result: -fno-common -DPIC +configure:9257: checking if gcc PIC flag -fno-common -DPIC works +configure:9276: gcc -c -g -O2 -fno-common -DPIC -DPIC conftest.c >&5 +configure:9280: $? = 0 +configure:9293: result: yes +configure:9322: checking if gcc static flag -static works +configure:9351: result: no +configure:9366: checking if gcc supports -c -o file.o +configure:9388: gcc -c -g -O2 -o out/conftest2.o conftest.c >&5 +configure:9392: $? = 0 +configure:9414: result: yes +configure:9422: checking if gcc supports -c -o file.o +configure:9470: result: yes +configure:9503: checking whether the gcc linker (/Library/Developer/CommandLineTools/usr/bin/ld) supports shared libraries +configure:10771: result: yes +configure:11012: checking dynamic linker characteristics +configure:11833: result: darwin22.1.0 dyld +configure:11955: checking how to hardcode library paths into programs +configure:11980: result: immediate +configure:12532: checking whether stripping libraries is possible +configure:12549: result: yes +configure:12583: checking if libtool supports shared libraries +configure:12585: result: yes +configure:12588: checking whether to build shared libraries +configure:12613: result: yes +configure:12616: checking whether to build static libraries +configure:12620: result: yes +configure:12658: checking for getaddrinfo +configure:12658: gcc -o conftest -g -O2 conftest.c >&5 +configure:12658: $? = 0 +configure:12658: result: yes +configure:12665: checking for getnameinfo +configure:12665: gcc -o conftest -g -O2 conftest.c >&5 +configure:12665: $? = 0 +configure:12665: result: yes +configure:12672: checking for gethostbyaddr_r +configure:12672: gcc -o conftest -g -O2 conftest.c >&5 +Undefined symbols for architecture arm64: + "_gethostbyaddr_r", referenced from: + _main in conftest-f9ae81.o +ld: symbol(s) not found for architecture arm64 +clang: error: linker command failed with exit code 1 (use -v to see invocation) +configure:12672: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libenet" +| #define PACKAGE_TARNAME "libenet" +| #define PACKAGE_VERSION "1.3.18" +| #define PACKAGE_STRING "libenet 1.3.18" +| #define PACKAGE_BUGREPORT "" +| #define PACKAGE_URL "" +| #define PACKAGE "libenet" +| #define VERSION "1.3.18" +| #define HAVE_STDIO_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_UNISTD_H 1 +| #define STDC_HEADERS 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define HAS_GETADDRINFO 1 +| #define HAS_GETNAMEINFO 1 +| /* end confdefs.h. */ +| /* Define gethostbyaddr_r to an innocuous variant, in case declares gethostbyaddr_r. +| For example, HP-UX 11i declares gettimeofday. */ +| #define gethostbyaddr_r innocuous_gethostbyaddr_r +| +| /* System header to define __stub macros and hopefully few prototypes, +| which can conflict with char gethostbyaddr_r (); below. */ +| +| #include +| #undef gethostbyaddr_r +| +| /* Override any GCC internal prototype to avoid an error. +| Use char because int might match the return type of a GCC +| builtin and then its argument prototype would still apply. */ +| #ifdef __cplusplus +| extern "C" +| #endif +| char gethostbyaddr_r (); +| /* The GNU C library defines this for functions which it implements +| to always fail with ENOSYS. Some functions are actually named +| something starting with __ and the normal name is an alias. */ +| #if defined __stub_gethostbyaddr_r || defined __stub___gethostbyaddr_r +| choke me +| #endif +| +| int +| main (void) +| { +| return gethostbyaddr_r (); +| ; +| return 0; +| } +configure:12672: result: no +configure:12679: checking for gethostbyname_r +configure:12679: gcc -o conftest -g -O2 conftest.c >&5 +Undefined symbols for architecture arm64: + "_gethostbyname_r", referenced from: + _main in conftest-95f779.o +ld: symbol(s) not found for architecture arm64 +clang: error: linker command failed with exit code 1 (use -v to see invocation) +configure:12679: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libenet" +| #define PACKAGE_TARNAME "libenet" +| #define PACKAGE_VERSION "1.3.18" +| #define PACKAGE_STRING "libenet 1.3.18" +| #define PACKAGE_BUGREPORT "" +| #define PACKAGE_URL "" +| #define PACKAGE "libenet" +| #define VERSION "1.3.18" +| #define HAVE_STDIO_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_UNISTD_H 1 +| #define STDC_HEADERS 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define HAS_GETADDRINFO 1 +| #define HAS_GETNAMEINFO 1 +| /* end confdefs.h. */ +| /* Define gethostbyname_r to an innocuous variant, in case declares gethostbyname_r. +| For example, HP-UX 11i declares gettimeofday. */ +| #define gethostbyname_r innocuous_gethostbyname_r +| +| /* System header to define __stub macros and hopefully few prototypes, +| which can conflict with char gethostbyname_r (); below. */ +| +| #include +| #undef gethostbyname_r +| +| /* Override any GCC internal prototype to avoid an error. +| Use char because int might match the return type of a GCC +| builtin and then its argument prototype would still apply. */ +| #ifdef __cplusplus +| extern "C" +| #endif +| char gethostbyname_r (); +| /* The GNU C library defines this for functions which it implements +| to always fail with ENOSYS. Some functions are actually named +| something starting with __ and the normal name is an alias. */ +| #if defined __stub_gethostbyname_r || defined __stub___gethostbyname_r +| choke me +| #endif +| +| int +| main (void) +| { +| return gethostbyname_r (); +| ; +| return 0; +| } +configure:12679: result: no +configure:12686: checking for poll +configure:12686: gcc -o conftest -g -O2 conftest.c >&5 +configure:12686: $? = 0 +configure:12686: result: yes +configure:12693: checking for fcntl +configure:12693: gcc -o conftest -g -O2 conftest.c >&5 +configure:12693: $? = 0 +configure:12693: result: yes +configure:12700: checking for inet_pton +configure:12700: gcc -o conftest -g -O2 conftest.c >&5 +configure:12700: $? = 0 +configure:12700: result: yes +configure:12707: checking for inet_ntop +configure:12707: gcc -o conftest -g -O2 conftest.c >&5 +configure:12707: $? = 0 +configure:12707: result: yes +configure:12715: checking for struct msghdr.msg_flags +configure:12715: gcc -c -g -O2 conftest.c >&5 +configure:12715: $? = 0 +configure:12715: result: yes +configure:12724: checking for socklen_t +configure:12724: gcc -c -g -O2 conftest.c >&5 +configure:12724: $? = 0 +configure:12724: gcc -c -g -O2 conftest.c >&5 +conftest.c:37:24: error: expected expression +if (sizeof ((socklen_t))) + ^ +1 error generated. +configure:12724: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libenet" +| #define PACKAGE_TARNAME "libenet" +| #define PACKAGE_VERSION "1.3.18" +| #define PACKAGE_STRING "libenet 1.3.18" +| #define PACKAGE_BUGREPORT "" +| #define PACKAGE_URL "" +| #define PACKAGE "libenet" +| #define VERSION "1.3.18" +| #define HAVE_STDIO_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_UNISTD_H 1 +| #define STDC_HEADERS 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define HAS_GETADDRINFO 1 +| #define HAS_GETNAMEINFO 1 +| #define HAS_POLL 1 +| #define HAS_FCNTL 1 +| #define HAS_INET_PTON 1 +| #define HAS_INET_NTOP 1 +| #define HAS_MSGHDR_FLAGS 1 +| /* end confdefs.h. */ +| #include +| #include +| +| +| int +| main (void) +| { +| if (sizeof ((socklen_t))) +| return 0; +| ; +| return 0; +| } +configure:12724: result: yes +configure:12882: checking that generated files are newer than configure +configure:12888: result: done +configure:12911: creating ./config.status + +## ---------------------- ## +## Running config.status. ## +## ---------------------- ## + +This file was extended by libenet config.status 1.3.18, which was +generated by GNU Autoconf 2.71. Invocation command line was + + CONFIG_FILES = + CONFIG_HEADERS = + CONFIG_LINKS = + CONFIG_COMMANDS = + $ ./config.status + +on Air-Caleb-2 + +config.status:1003: creating Makefile +config.status:1003: creating libenet.pc +config.status:1175: executing depfiles commands +config.status:1252: cd . && sed -e '/# am--include-marker/d' Makefile | make -f - am--depfiles +config.status:1257: $? = 0 +config.status:1175: executing libtool commands + +## ---------------- ## +## Cache variables. ## +## ---------------- ## + +ac_cv_build=aarch64-apple-darwin22.1.0 +ac_cv_c_compiler_gnu=yes +ac_cv_env_CC_set= +ac_cv_env_CC_value= +ac_cv_env_CFLAGS_set= +ac_cv_env_CFLAGS_value= +ac_cv_env_CPPFLAGS_set= +ac_cv_env_CPPFLAGS_value= +ac_cv_env_LDFLAGS_set= +ac_cv_env_LDFLAGS_value= +ac_cv_env_LIBS_set= +ac_cv_env_LIBS_value= +ac_cv_env_LT_SYS_LIBRARY_PATH_set= +ac_cv_env_LT_SYS_LIBRARY_PATH_value= +ac_cv_env_build_alias_set= +ac_cv_env_build_alias_value= +ac_cv_env_host_alias_set= +ac_cv_env_host_alias_value= +ac_cv_env_target_alias_set= +ac_cv_env_target_alias_value= +ac_cv_func_fcntl=yes +ac_cv_func_getaddrinfo=yes +ac_cv_func_gethostbyaddr_r=no +ac_cv_func_gethostbyname_r=no +ac_cv_func_getnameinfo=yes +ac_cv_func_inet_ntop=yes +ac_cv_func_inet_pton=yes +ac_cv_func_poll=yes +ac_cv_header_dlfcn_h=yes +ac_cv_header_inttypes_h=yes +ac_cv_header_stdint_h=yes +ac_cv_header_stdio_h=yes +ac_cv_header_stdlib_h=yes +ac_cv_header_string_h=yes +ac_cv_header_strings_h=yes +ac_cv_header_sys_stat_h=yes +ac_cv_header_sys_types_h=yes +ac_cv_header_unistd_h=yes +ac_cv_host=aarch64-apple-darwin22.1.0 +ac_cv_member_struct_msghdr_msg_flags=yes +ac_cv_objext=o +ac_cv_path_EGREP='/usr/bin/grep -E' +ac_cv_path_FGREP='/usr/bin/grep -F' +ac_cv_path_GREP=/usr/bin/grep +ac_cv_path_SED=/usr/bin/sed +ac_cv_path_install='/usr/bin/install -c' +ac_cv_path_lt_DD=/bin/dd +ac_cv_prog_AWK=awk +ac_cv_prog_ac_ct_AR=ar +ac_cv_prog_ac_ct_CC=gcc +ac_cv_prog_ac_ct_DSYMUTIL=dsymutil +ac_cv_prog_ac_ct_FILECMD=file +ac_cv_prog_ac_ct_LIPO=lipo +ac_cv_prog_ac_ct_NMEDIT=nmedit +ac_cv_prog_ac_ct_OBJDUMP=objdump +ac_cv_prog_ac_ct_OTOOL=otool +ac_cv_prog_ac_ct_RANLIB=ranlib +ac_cv_prog_ac_ct_STRIP=strip +ac_cv_prog_cc_c11= +ac_cv_prog_cc_g=yes +ac_cv_prog_cc_stdc= +ac_cv_prog_make_make_set=yes +ac_cv_type_socklen_t=yes +am_cv_CC_dependencies_compiler_type=gcc3 +am_cv_make_support_nested_variables=yes +am_cv_prog_cc_c_o=yes +lt_cv_apple_cc_single_mod=yes +lt_cv_ar_at_file=no +lt_cv_deplibs_check_method=pass_all +lt_cv_file_magic_cmd='$MAGIC_CMD' +lt_cv_file_magic_test_file= +lt_cv_ld_exported_symbols_list=yes +lt_cv_ld_force_load=yes +lt_cv_ld_reload_flag=-r +lt_cv_nm_interface='BSD nm' +lt_cv_objdir=.libs +lt_cv_path_LD=/Library/Developer/CommandLineTools/usr/bin/ld +lt_cv_path_NM='/usr/bin/nm -B' +lt_cv_path_mainfest_tool=no +lt_cv_prog_compiler_c_o=yes +lt_cv_prog_compiler_pic='-fno-common -DPIC' +lt_cv_prog_compiler_pic_works=yes +lt_cv_prog_compiler_rtti_exceptions=yes +lt_cv_prog_compiler_static_works=no +lt_cv_prog_gnu_ld=no +lt_cv_sharedlib_from_linklib_cmd='printf %s\n' +lt_cv_sys_global_symbol_pipe='/usr/bin/sed -n -e '\''s/^.*[ ]\([ABCDGIRSTW][ABCDGIRSTW]*\)[ ][ ]*_\([_A-Za-z][_A-Za-z0-9]*\)$/\1 _\2 \2/p'\'' | /usr/bin/sed '\''/ __gnu_lto/d'\''' +lt_cv_sys_global_symbol_to_c_name_address='/usr/bin/sed -n -e '\''s/^: \(.*\) .*$/ {"\1", (void *) 0},/p'\'' -e '\''s/^[ABCDGIRSTW][ABCDGIRSTW]* .* \(.*\)$/ {"\1", (void *) \&\1},/p'\''' +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='/usr/bin/sed -n -e '\''s/^: \(.*\) .*$/ {"\1", (void *) 0},/p'\'' -e '\''s/^[ABCDGIRSTW][ABCDGIRSTW]* .* \(lib.*\)$/ {"\1", (void *) \&\1},/p'\'' -e '\''s/^[ABCDGIRSTW][ABCDGIRSTW]* .* \(.*\)$/ {"lib\1", (void *) \&\1},/p'\''' +lt_cv_sys_global_symbol_to_cdecl='/usr/bin/sed -n -e '\''s/^T .* \(.*\)$/extern int \1();/p'\'' -e '\''s/^[ABCDGIRSTW][ABCDGIRSTW]* .* \(.*\)$/extern char \1;/p'\''' +lt_cv_sys_global_symbol_to_import= +lt_cv_sys_max_cmd_len=786432 +lt_cv_to_host_file_cmd=func_convert_file_noop +lt_cv_to_tool_file_cmd=func_convert_file_noop +lt_cv_truncate_bin='/bin/dd bs=4096 count=1' + +## ----------------- ## +## Output variables. ## +## ----------------- ## + +ACLOCAL='${SHELL} '\''/Users/cfoust/Developer/cfoust/sour/pkg/enet/enet/missing'\'' aclocal-1.16' +AMDEPBACKSLASH='\' +AMDEP_FALSE='#' +AMDEP_TRUE='' +AMTAR='$${TAR-tar}' +AM_BACKSLASH='\' +AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' +AM_DEFAULT_VERBOSITY='1' +AM_V='$(V)' +AR='ar' +AUTOCONF='${SHELL} '\''/Users/cfoust/Developer/cfoust/sour/pkg/enet/enet/missing'\'' autoconf' +AUTOHEADER='${SHELL} '\''/Users/cfoust/Developer/cfoust/sour/pkg/enet/enet/missing'\'' autoheader' +AUTOMAKE='${SHELL} '\''/Users/cfoust/Developer/cfoust/sour/pkg/enet/enet/missing'\'' automake-1.16' +AWK='awk' +CC='gcc' +CCDEPMODE='depmode=gcc3' +CFLAGS='-g -O2' +CPPFLAGS='' +CSCOPE='cscope' +CTAGS='ctags' +CYGPATH_W='echo' +DEFS='-DPACKAGE_NAME=\"libenet\" -DPACKAGE_TARNAME=\"libenet\" -DPACKAGE_VERSION=\"1.3.18\" -DPACKAGE_STRING=\"libenet\ 1.3.18\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DPACKAGE=\"libenet\" -DVERSION=\"1.3.18\" -DHAVE_STDIO_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_STRINGS_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_UNISTD_H=1 -DSTDC_HEADERS=1 -DHAVE_DLFCN_H=1 -DLT_OBJDIR=\".libs/\" -DHAS_GETADDRINFO=1 -DHAS_GETNAMEINFO=1 -DHAS_POLL=1 -DHAS_FCNTL=1 -DHAS_INET_PTON=1 -DHAS_INET_NTOP=1 -DHAS_MSGHDR_FLAGS=1 -DHAS_SOCKLEN_T=1' +DEPDIR='.deps' +DLLTOOL='false' +DSYMUTIL='dsymutil' +DUMPBIN='' +ECHO_C='\c' +ECHO_N='' +ECHO_T='' +EGREP='/usr/bin/grep -E' +ETAGS='etags' +EXEEXT='' +FGREP='/usr/bin/grep -F' +FILECMD='file' +GREP='/usr/bin/grep' +INSTALL_DATA='${INSTALL} -m 644' +INSTALL_PROGRAM='${INSTALL}' +INSTALL_SCRIPT='${INSTALL}' +INSTALL_STRIP_PROGRAM='$(install_sh) -c -s' +LD='/Library/Developer/CommandLineTools/usr/bin/ld' +LDFLAGS='' +LIBOBJS='' +LIBS='' +LIBTOOL='$(SHELL) $(top_builddir)/libtool' +LIPO='lipo' +LN_S='ln -s' +LTLIBOBJS='' +LT_SYS_LIBRARY_PATH='' +MAKEINFO='${SHELL} '\''/Users/cfoust/Developer/cfoust/sour/pkg/enet/enet/missing'\'' makeinfo' +MANIFEST_TOOL=':' +MKDIR_P='./install-sh -c -d' +NM='/usr/bin/nm -B' +NMEDIT='nmedit' +OBJDUMP='objdump' +OBJEXT='o' +OTOOL64=':' +OTOOL='otool' +PACKAGE='libenet' +PACKAGE_BUGREPORT='' +PACKAGE_NAME='libenet' +PACKAGE_STRING='libenet 1.3.18' +PACKAGE_TARNAME='libenet' +PACKAGE_URL='' +PACKAGE_VERSION='1.3.18' +PATH_SEPARATOR=':' +RANLIB='ranlib' +SED='/usr/bin/sed' +SET_MAKE='' +SHELL='/bin/sh' +STRIP='strip' +VERSION='1.3.18' +ac_ct_AR='ar' +ac_ct_CC='gcc' +ac_ct_DUMPBIN='' +am__EXEEXT_FALSE='' +am__EXEEXT_TRUE='#' +am__fastdepCC_FALSE='#' +am__fastdepCC_TRUE='' +am__include='include' +am__isrc='' +am__leading_dot='.' +am__nodep='_no' +am__quote='' +am__tar='$${TAR-tar} chof - "$$tardir"' +am__untar='$${TAR-tar} xf -' +bindir='${exec_prefix}/bin' +build='aarch64-apple-darwin22.1.0' +build_alias='' +build_cpu='aarch64' +build_os='darwin22.1.0' +build_vendor='apple' +datadir='${datarootdir}' +datarootdir='${prefix}/share' +docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' +dvidir='${docdir}' +exec_prefix='${prefix}' +host='aarch64-apple-darwin22.1.0' +host_alias='' +host_cpu='aarch64' +host_os='darwin22.1.0' +host_vendor='apple' +htmldir='${docdir}' +includedir='${prefix}/include' +infodir='${datarootdir}/info' +install_sh='${SHELL} /Users/cfoust/Developer/cfoust/sour/pkg/enet/enet/install-sh' +libdir='${exec_prefix}/lib' +libexecdir='${exec_prefix}/libexec' +localedir='${datarootdir}/locale' +localstatedir='${prefix}/var' +mandir='${datarootdir}/man' +mkdir_p='$(MKDIR_P)' +oldincludedir='/usr/include' +pdfdir='${docdir}' +prefix='/usr/local' +program_transform_name='s,x,x,' +psdir='${docdir}' +runstatedir='${localstatedir}/run' +sbindir='${exec_prefix}/sbin' +sharedstatedir='${prefix}/com' +sysconfdir='${prefix}/etc' +target_alias='' + +## ----------- ## +## confdefs.h. ## +## ----------- ## + +/* confdefs.h */ +#define PACKAGE_NAME "libenet" +#define PACKAGE_TARNAME "libenet" +#define PACKAGE_VERSION "1.3.18" +#define PACKAGE_STRING "libenet 1.3.18" +#define PACKAGE_BUGREPORT "" +#define PACKAGE_URL "" +#define PACKAGE "libenet" +#define VERSION "1.3.18" +#define HAVE_STDIO_H 1 +#define HAVE_STDLIB_H 1 +#define HAVE_STRING_H 1 +#define HAVE_INTTYPES_H 1 +#define HAVE_STDINT_H 1 +#define HAVE_STRINGS_H 1 +#define HAVE_SYS_STAT_H 1 +#define HAVE_SYS_TYPES_H 1 +#define HAVE_UNISTD_H 1 +#define STDC_HEADERS 1 +#define HAVE_DLFCN_H 1 +#define LT_OBJDIR ".libs/" +#define HAS_GETADDRINFO 1 +#define HAS_GETNAMEINFO 1 +#define HAS_POLL 1 +#define HAS_FCNTL 1 +#define HAS_INET_PTON 1 +#define HAS_INET_NTOP 1 +#define HAS_MSGHDR_FLAGS 1 +#define HAS_SOCKLEN_T 1 + +configure: exit 0 diff --git a/pkg/enet/enet/config.status b/pkg/enet/enet/config.status new file mode 100755 index 000000000..eaa62ec62 --- /dev/null +++ b/pkg/enet/enet/config.status @@ -0,0 +1,1833 @@ +#! /bin/sh +# Generated by configure. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false + +SHELL=${CONFIG_SHELL-/bin/sh} +export SHELL +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +as_nop=: +if test ${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1 +then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else $as_nop + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + + +# Reset variables that may have inherited troublesome values from +# the environment. + +# IFS needs to be set, to space, tab, and newline, in precisely that order. +# (If _AS_PATH_WALK were called with IFS unset, it would have the +# side effect of setting IFS to empty, thus disabling word splitting.) +# Quoting is to prevent editors from complaining about space-tab. +as_nl=' +' +export as_nl +IFS=" "" $as_nl" + +PS1='$ ' +PS2='> ' +PS4='+ ' + +# Ensure predictable behavior from utilities with locale-dependent output. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# We cannot yet rely on "unset" to work, but we need these variables +# to be unset--not just set to an empty or harmless value--now, to +# avoid bugs in old shells (e.g. pre-3.0 UWIN ksh). This construct +# also avoids known problems related to "unset" and subshell syntax +# in other old shells (e.g. bash 2.01 and pdksh 5.2.14). +for as_var in BASH_ENV ENV MAIL MAILPATH CDPATH +do eval test \${$as_var+y} \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done + +# Ensure that fds 0, 1, and 2 are open. +if (exec 3>&0) 2>/dev/null; then :; else exec 0&1) 2>/dev/null; then :; else exec 1>/dev/null; fi +if (exec 3>&2) ; then :; else exec 2>/dev/null; fi + +# The user is always right. +if ${PATH_SEPARATOR+false} :; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + test -r "$as_dir$0" && as_myself=$as_dir$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + printf "%s\n" "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + printf "%s\n" "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + + + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset + +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null +then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else $as_nop + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null +then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else $as_nop + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +printf "%s\n" X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + + +# Determine whether it's possible to make 'echo' print without a newline. +# These variables are no longer used directly by Autoconf, but are AC_SUBSTed +# for compatibility with existing Makefiles. +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +# For backward compatibility with old third-party macros, we provide +# the shell variables $as_echo and $as_echo_n. New code should use +# AS_ECHO(["message"]) and AS_ECHO_N(["message"]), respectively. +as_echo='printf %s\n' +as_echo_n='printf %s' + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`printf "%s\n" "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +printf "%s\n" X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +exec 6>&1 +## ----------------------------------- ## +## Main body of $CONFIG_STATUS script. ## +## ----------------------------------- ## +# Save the log message, to keep $0 and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by libenet $as_me 1.3.18, which was +generated by GNU Autoconf 2.71. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" + +# Files that config.status was made for. +config_files=" Makefile libenet.pc" +config_commands=" depfiles libtool" + +ac_cs_usage="\ +\`$as_me' instantiates files and other configuration actions +from templates according to the current configuration. Unless the files +and actions are specified as TAGs, all are instantiated by default. + +Usage: $0 [OPTION]... [TAG]... + + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + --config print configuration, then exit + -q, --quiet, --silent + do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + +Configuration files: +$config_files + +Configuration commands: +$config_commands + +Report bugs to the package provider." + +ac_cs_config='' +ac_cs_version="\ +libenet config.status 1.3.18 +configured by ./configure, generated by GNU Autoconf 2.71, + with options \"$ac_cs_config\" + +Copyright (C) 2021 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." + +ac_pwd='/Users/cfoust/Developer/cfoust/sour/pkg/enet/enet' +srcdir='.' +INSTALL='/usr/bin/install -c' +MKDIR_P='./install-sh -c -d' +AWK='awk' +test -n "$AWK" || AWK=awk +# The default lists apply if the user does not specify any file. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=?*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + --*=) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg= + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + printf "%s\n" "$ac_cs_version"; exit ;; + --config | --confi | --conf | --con | --co | --c ) + printf "%s\n" "$ac_cs_config"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`printf "%s\n" "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + '') as_fn_error $? "missing file argument" ;; + esac + as_fn_append CONFIG_FILES " '$ac_optarg'" + ac_need_defaults=false;; + --he | --h | --help | --hel | -h ) + printf "%s\n" "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) as_fn_error $? "unrecognized option: \`$1' +Try \`$0 --help' for more information." ;; + + *) as_fn_append ac_config_targets " $1" + ac_need_defaults=false ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +if $ac_cs_recheck; then + set X /bin/sh './configure' $ac_configure_extra_args --no-create --no-recursion + shift + \printf "%s\n" "running CONFIG_SHELL=/bin/sh $*" >&6 + CONFIG_SHELL='/bin/sh' + export CONFIG_SHELL + exec "$@" +fi + +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + printf "%s\n" "$ac_log" +} >&5 + +# +# INIT-COMMANDS +# +AMDEP_TRUE="" MAKE="make" + + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +sed_quote_subst='s/\(["`$\\]\)/\\\1/g' +double_quote_subst='s/\(["`\\]\)/\\\1/g' +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' +macro_version='2.4.7' +macro_revision='2.4.7' +enable_shared='yes' +enable_static='yes' +pic_mode='default' +enable_fast_install='needless' +shared_archive_member_spec='' +SHELL='/bin/sh' +ECHO='printf %s\n' +PATH_SEPARATOR=':' +host_alias='' +host='aarch64-apple-darwin22.1.0' +host_os='darwin22.1.0' +build_alias='' +build='aarch64-apple-darwin22.1.0' +build_os='darwin22.1.0' +SED='/usr/bin/sed' +Xsed='/usr/bin/sed -e 1s/^X//' +GREP='/usr/bin/grep' +EGREP='/usr/bin/grep -E' +FGREP='/usr/bin/grep -F' +LD='/Library/Developer/CommandLineTools/usr/bin/ld' +NM='/usr/bin/nm -B' +LN_S='ln -s' +max_cmd_len='786432' +ac_objext='o' +exeext='' +lt_unset='unset' +lt_SP2NL='tr \040 \012' +lt_NL2SP='tr \015\012 \040\040' +lt_cv_to_host_file_cmd='func_convert_file_noop' +lt_cv_to_tool_file_cmd='func_convert_file_noop' +reload_flag=' -r' +reload_cmds='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs' +FILECMD='file' +OBJDUMP='objdump' +deplibs_check_method='pass_all' +file_magic_cmd='$MAGIC_CMD' +file_magic_glob='' +want_nocaseglob='no' +DLLTOOL='false' +sharedlib_from_linklib_cmd='printf %s\n' +AR='ar' +lt_ar_flags='cr' +AR_FLAGS='cr' +archiver_list_spec='' +STRIP='strip' +RANLIB='ranlib' +old_postinstall_cmds='chmod 644 $oldlib~$RANLIB $tool_oldlib' +old_postuninstall_cmds='' +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs~$RANLIB $tool_oldlib' +lock_old_archive_extraction='yes' +CC='gcc' +CFLAGS='-g -O2' +compiler='gcc' +GCC='yes' +lt_cv_sys_global_symbol_pipe='/usr/bin/sed -n -e '\''s/^.*[ ]\([ABCDGIRSTW][ABCDGIRSTW]*\)[ ][ ]*_\([_A-Za-z][_A-Za-z0-9]*\)$/\1 _\2 \2/p'\'' | /usr/bin/sed '\''/ __gnu_lto/d'\''' +lt_cv_sys_global_symbol_to_cdecl='/usr/bin/sed -n -e '\''s/^T .* \(.*\)$/extern int \1();/p'\'' -e '\''s/^[ABCDGIRSTW][ABCDGIRSTW]* .* \(.*\)$/extern char \1;/p'\''' +lt_cv_sys_global_symbol_to_import='' +lt_cv_sys_global_symbol_to_c_name_address='/usr/bin/sed -n -e '\''s/^: \(.*\) .*$/ {"\1", (void *) 0},/p'\'' -e '\''s/^[ABCDGIRSTW][ABCDGIRSTW]* .* \(.*\)$/ {"\1", (void *) \&\1},/p'\''' +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='/usr/bin/sed -n -e '\''s/^: \(.*\) .*$/ {"\1", (void *) 0},/p'\'' -e '\''s/^[ABCDGIRSTW][ABCDGIRSTW]* .* \(lib.*\)$/ {"\1", (void *) \&\1},/p'\'' -e '\''s/^[ABCDGIRSTW][ABCDGIRSTW]* .* \(.*\)$/ {"lib\1", (void *) \&\1},/p'\''' +lt_cv_nm_interface='BSD nm' +nm_file_list_spec='@' +lt_sysroot='' +lt_cv_truncate_bin='/bin/dd bs=4096 count=1' +objdir='.libs' +MAGIC_CMD='file' +lt_prog_compiler_no_builtin_flag=' -fno-builtin -fno-rtti -fno-exceptions' +lt_prog_compiler_pic=' -fno-common -DPIC' +lt_prog_compiler_wl='-Wl,' +lt_prog_compiler_static='' +lt_cv_prog_compiler_c_o='yes' +need_locks='no' +MANIFEST_TOOL=':' +DSYMUTIL='dsymutil' +NMEDIT='nmedit' +LIPO='lipo' +OTOOL='otool' +OTOOL64=':' +libext='a' +shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' +extract_expsyms_cmds='' +archive_cmds_need_lc='no' +enable_shared_with_static_runtimes='no' +export_dynamic_flag_spec='' +whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' +compiler_needs_object='no' +old_archive_from_new_cmds='' +old_archive_from_expsyms_cmds='' +archive_cmds='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring $single_module' +archive_expsym_cmds='/usr/bin/sed '\''s|^|_|'\'' < $export_symbols > $output_objdir/$libname-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring $single_module $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' +module_cmds='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs $compiler_flags' +module_expsym_cmds='/usr/bin/sed -e '\''s|^|_|'\'' < $export_symbols > $output_objdir/$libname-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs $compiler_flags $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' +with_gnu_ld='no' +allow_undefined_flag='$wl-undefined ${wl}dynamic_lookup' +no_undefined_flag='' +hardcode_libdir_flag_spec='' +hardcode_libdir_separator='' +hardcode_direct='no' +hardcode_direct_absolute='no' +hardcode_minus_L='no' +hardcode_shlibpath_var='unsupported' +hardcode_automatic='yes' +inherit_rpath='no' +link_all_deplibs='yes' +always_export_symbols='no' +export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' +exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' +include_expsyms='' +prelink_cmds='' +postlink_cmds='' +file_list_spec='' +variables_saved_for_relink='PATH DYLD_LIBRARY_PATH GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH' +need_lib_prefix='no' +need_version='no' +version_type='darwin' +runpath_var='' +shlibpath_var='DYLD_LIBRARY_PATH' +shlibpath_overrides_runpath='yes' +libname_spec='lib$name' +library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' +soname_spec='$libname$release$major$shared_ext' +install_override_mode='' +postinstall_cmds='' +postuninstall_cmds='' +finish_cmds='' +finish_eval='' +hardcode_into_libs='no' +sys_lib_search_path_spec='/Library/Developer/CommandLineTools/usr/lib/clang/14.0.3 /usr/local/lib' +configure_time_dlsearch_path='/usr/local/lib /lib /usr/lib' +configure_time_lt_sys_library_path='' +hardcode_action='immediate' +enable_dlopen='unknown' +enable_dlopen_self='unknown' +enable_dlopen_self_static='unknown' +old_striplib='strip -S' +striplib='strip -x' + +LTCC='gcc' +LTCFLAGS='-g -O2' +compiler='gcc' + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' +} + +# Quote evaled strings. +for var in SHELL ECHO PATH_SEPARATOR SED GREP EGREP FGREP LD NM LN_S lt_SP2NL lt_NL2SP reload_flag FILECMD OBJDUMP deplibs_check_method file_magic_cmd file_magic_glob want_nocaseglob DLLTOOL sharedlib_from_linklib_cmd AR archiver_list_spec STRIP RANLIB CC CFLAGS compiler lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl lt_cv_sys_global_symbol_to_import lt_cv_sys_global_symbol_to_c_name_address lt_cv_sys_global_symbol_to_c_name_address_lib_prefix lt_cv_nm_interface nm_file_list_spec lt_cv_truncate_bin lt_prog_compiler_no_builtin_flag lt_prog_compiler_pic lt_prog_compiler_wl lt_prog_compiler_static lt_cv_prog_compiler_c_o need_locks MANIFEST_TOOL DSYMUTIL NMEDIT LIPO OTOOL OTOOL64 shrext_cmds export_dynamic_flag_spec whole_archive_flag_spec compiler_needs_object with_gnu_ld allow_undefined_flag no_undefined_flag hardcode_libdir_flag_spec hardcode_libdir_separator exclude_expsyms include_expsyms file_list_spec variables_saved_for_relink libname_spec library_names_spec soname_spec install_override_mode finish_eval old_striplib striplib; do + case `eval \\$ECHO \\""\\$$var"\\"` in + *[\\\`\"\$]*) + eval "lt_$var=\\\"\`\$ECHO \"\$$var\" | \$SED \"\$sed_quote_subst\"\`\\\"" ## exclude from sc_prohibit_nested_quotes + ;; + *) + eval "lt_$var=\\\"\$$var\\\"" + ;; + esac +done + +# Double-quote double-evaled strings. +for var in reload_cmds old_postinstall_cmds old_postuninstall_cmds old_archive_cmds extract_expsyms_cmds old_archive_from_new_cmds old_archive_from_expsyms_cmds archive_cmds archive_expsym_cmds module_cmds module_expsym_cmds export_symbols_cmds prelink_cmds postlink_cmds postinstall_cmds postuninstall_cmds finish_cmds sys_lib_search_path_spec configure_time_dlsearch_path configure_time_lt_sys_library_path; do + case `eval \\$ECHO \\""\\$$var"\\"` in + *[\\\`\"\$]*) + eval "lt_$var=\\\"\`\$ECHO \"\$$var\" | \$SED -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" ## exclude from sc_prohibit_nested_quotes + ;; + *) + eval "lt_$var=\\\"\$$var\\\"" + ;; + esac +done + +ac_aux_dir='./' + +# See if we are running on zsh, and set the options that allow our +# commands through without removal of \ escapes INIT. +if test -n "${ZSH_VERSION+set}"; then + setopt NO_GLOB_SUBST +fi + + + PACKAGE='libenet' + VERSION='1.3.18' + RM='rm -f' + ofile='libtool' + + + + + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; + "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; + "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "libenet.pc") CONFIG_FILES="$CONFIG_FILES libenet.pc" ;; + + *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; + esac +done + + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test ${CONFIG_FILES+y} || CONFIG_FILES=$config_files + test ${CONFIG_COMMANDS+y} || CONFIG_COMMANDS=$config_commands +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= ac_tmp= + trap 'exit_status=$? + : "${ac_tmp:=$tmp}" + { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status +' 0 + trap 'as_fn_exit 1' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 +ac_tmp=$tmp + +# Set up the scripts for CONFIG_FILES section. +# No need to generate them if there are no CONFIG_FILES. +# This happens for instance with `./config.status config.h'. +if test -n "$CONFIG_FILES"; then + + +ac_cr=`echo X | tr X '\015'` +# On cygwin, bash can eat \r inside `` if the user requested igncr. +# But we know of no other shell where ac_cr would be empty at this +# point, so we can use a bashism as a fallback. +if test "x$ac_cr" = x; then + eval ac_cr=\$\'\\r\' +fi +ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` +if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then + ac_cs_awk_cr='\\r' +else + ac_cs_awk_cr=$ac_cr +fi + +echo 'BEGIN {' >"$ac_tmp/subs1.awk" && +cat >>"$ac_tmp/subs1.awk" <<\_ACAWK && +S["am__EXEEXT_FALSE"]="" +S["am__EXEEXT_TRUE"]="#" +S["LTLIBOBJS"]="" +S["LIBOBJS"]="" +S["LT_SYS_LIBRARY_PATH"]="" +S["OTOOL64"]=":" +S["OTOOL"]="otool" +S["LIPO"]="lipo" +S["NMEDIT"]="nmedit" +S["DSYMUTIL"]="dsymutil" +S["MANIFEST_TOOL"]=":" +S["RANLIB"]="ranlib" +S["ac_ct_AR"]="ar" +S["AR"]="ar" +S["DLLTOOL"]="false" +S["OBJDUMP"]="objdump" +S["FILECMD"]="file" +S["LN_S"]="ln -s" +S["NM"]="/usr/bin/nm -B" +S["ac_ct_DUMPBIN"]="" +S["DUMPBIN"]="" +S["LD"]="/Library/Developer/CommandLineTools/usr/bin/ld" +S["FGREP"]="/usr/bin/grep -F" +S["EGREP"]="/usr/bin/grep -E" +S["GREP"]="/usr/bin/grep" +S["SED"]="/usr/bin/sed" +S["host_os"]="darwin22.1.0" +S["host_vendor"]="apple" +S["host_cpu"]="aarch64" +S["host"]="aarch64-apple-darwin22.1.0" +S["build_os"]="darwin22.1.0" +S["build_vendor"]="apple" +S["build_cpu"]="aarch64" +S["build"]="aarch64-apple-darwin22.1.0" +S["LIBTOOL"]="$(SHELL) $(top_builddir)/libtool" +S["am__fastdepCC_FALSE"]="#" +S["am__fastdepCC_TRUE"]="" +S["CCDEPMODE"]="depmode=gcc3" +S["am__nodep"]="_no" +S["AMDEPBACKSLASH"]="\\" +S["AMDEP_FALSE"]="#" +S["AMDEP_TRUE"]="" +S["am__include"]="include" +S["DEPDIR"]=".deps" +S["OBJEXT"]="o" +S["EXEEXT"]="" +S["ac_ct_CC"]="gcc" +S["CPPFLAGS"]="" +S["LDFLAGS"]="" +S["CFLAGS"]="-g -O2" +S["CC"]="gcc" +S["AM_BACKSLASH"]="\\" +S["AM_DEFAULT_VERBOSITY"]="1" +S["AM_DEFAULT_V"]="$(AM_DEFAULT_VERBOSITY)" +S["AM_V"]="$(V)" +S["CSCOPE"]="cscope" +S["ETAGS"]="etags" +S["CTAGS"]="ctags" +S["am__untar"]="$${TAR-tar} xf -" +S["am__tar"]="$${TAR-tar} chof - \"$$tardir\"" +S["AMTAR"]="$${TAR-tar}" +S["am__leading_dot"]="." +S["SET_MAKE"]="" +S["AWK"]="awk" +S["mkdir_p"]="$(MKDIR_P)" +S["MKDIR_P"]="./install-sh -c -d" +S["INSTALL_STRIP_PROGRAM"]="$(install_sh) -c -s" +S["STRIP"]="strip" +S["install_sh"]="${SHELL} /Users/cfoust/Developer/cfoust/sour/pkg/enet/enet/install-sh" +S["MAKEINFO"]="${SHELL} '/Users/cfoust/Developer/cfoust/sour/pkg/enet/enet/missing' makeinfo" +S["AUTOHEADER"]="${SHELL} '/Users/cfoust/Developer/cfoust/sour/pkg/enet/enet/missing' autoheader" +S["AUTOMAKE"]="${SHELL} '/Users/cfoust/Developer/cfoust/sour/pkg/enet/enet/missing' automake-1.16" +S["AUTOCONF"]="${SHELL} '/Users/cfoust/Developer/cfoust/sour/pkg/enet/enet/missing' autoconf" +S["ACLOCAL"]="${SHELL} '/Users/cfoust/Developer/cfoust/sour/pkg/enet/enet/missing' aclocal-1.16" +S["VERSION"]="1.3.18" +S["PACKAGE"]="libenet" +S["CYGPATH_W"]="echo" +S["am__isrc"]="" +S["INSTALL_DATA"]="${INSTALL} -m 644" +S["INSTALL_SCRIPT"]="${INSTALL}" +S["INSTALL_PROGRAM"]="${INSTALL}" +S["target_alias"]="" +S["host_alias"]="" +S["build_alias"]="" +S["LIBS"]="" +S["ECHO_T"]="" +S["ECHO_N"]="" +S["ECHO_C"]="\\c" +S["DEFS"]="-DPACKAGE_NAME=\\\"libenet\\\" -DPACKAGE_TARNAME=\\\"libenet\\\" -DPACKAGE_VERSION=\\\"1.3.18\\\" -DPACKAGE_STRING=\\\"libenet\\ 1.3.18\\\" -DPACKAGE_BUGREPORT=\\\"\\\" "\ +"-DPACKAGE_URL=\\\"\\\" -DPACKAGE=\\\"libenet\\\" -DVERSION=\\\"1.3.18\\\" -DHAVE_STDIO_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT"\ +"_H=1 -DHAVE_STRINGS_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_UNISTD_H=1 -DSTDC_HEADERS=1 -DHAVE_DLFCN_H=1 -DLT_OBJDIR=\\\".libs/\\\" -DHAS_GE"\ +"TADDRINFO=1 -DHAS_GETNAMEINFO=1 -DHAS_POLL=1 -DHAS_FCNTL=1 -DHAS_INET_PTON=1 -DHAS_INET_NTOP=1 -DHAS_MSGHDR_FLAGS=1 -DHAS_SOCKLEN_T=1" +S["mandir"]="${datarootdir}/man" +S["localedir"]="${datarootdir}/locale" +S["libdir"]="${exec_prefix}/lib" +S["psdir"]="${docdir}" +S["pdfdir"]="${docdir}" +S["dvidir"]="${docdir}" +S["htmldir"]="${docdir}" +S["infodir"]="${datarootdir}/info" +S["docdir"]="${datarootdir}/doc/${PACKAGE_TARNAME}" +S["oldincludedir"]="/usr/include" +S["includedir"]="${prefix}/include" +S["runstatedir"]="${localstatedir}/run" +S["localstatedir"]="${prefix}/var" +S["sharedstatedir"]="${prefix}/com" +S["sysconfdir"]="${prefix}/etc" +S["datadir"]="${datarootdir}" +S["datarootdir"]="${prefix}/share" +S["libexecdir"]="${exec_prefix}/libexec" +S["sbindir"]="${exec_prefix}/sbin" +S["bindir"]="${exec_prefix}/bin" +S["program_transform_name"]="s,x,x," +S["prefix"]="/usr/local" +S["exec_prefix"]="${prefix}" +S["PACKAGE_URL"]="" +S["PACKAGE_BUGREPORT"]="" +S["PACKAGE_STRING"]="libenet 1.3.18" +S["PACKAGE_VERSION"]="1.3.18" +S["PACKAGE_TARNAME"]="libenet" +S["PACKAGE_NAME"]="libenet" +S["PATH_SEPARATOR"]=":" +S["SHELL"]="/bin/sh" +S["am__quote"]="" +_ACAWK +cat >>"$ac_tmp/subs1.awk" <<_ACAWK && + for (key in S) S_is_set[key] = 1 + FS = "" + +} +{ + line = $ 0 + nfields = split(line, field, "@") + substed = 0 + len = length(field[1]) + for (i = 2; i < nfields; i++) { + key = field[i] + keylen = length(key) + if (S_is_set[key]) { + value = S[key] + line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) + len += length(value) + length(field[++i]) + substed = 1 + } else + len += 1 + keylen + } + + print line +} + +_ACAWK +if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then + sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" +else + cat +fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ + || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 +fi # test -n "$CONFIG_FILES" + + +eval set X " :F $CONFIG_FILES :C $CONFIG_COMMANDS" +shift +for ac_tag +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$ac_tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; + esac + case $ac_f in *\'*) ac_f=`printf "%s\n" "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + as_fn_append ac_file_inputs " '$ac_f'" + done + + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input='Generated from '` + printf "%s\n" "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + `' by configure.' + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 +printf "%s\n" "$as_me: creating $ac_file" >&6;} + fi + # Neutralize special characters interpreted by sed in replacement strings. + case $configure_input in #( + *\&* | *\|* | *\\* ) + ac_sed_conf_input=`printf "%s\n" "$configure_input" | + sed 's/[\\\\&|]/\\\\&/g'`;; #( + *) ac_sed_conf_input=$configure_input;; + esac + + case $ac_tag in + *:-:* | *:-) cat >"$ac_tmp/stdin" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +printf "%s\n" X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir="$ac_dir"; as_fn_mkdir_p + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`printf "%s\n" "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`printf "%s\n" "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + + case $ac_mode in + :F) + # + # CONFIG_FILE + # + + case $INSTALL in + [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; + *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; + esac + ac_MKDIR_P=$MKDIR_P + case $MKDIR_P in + [\\/$]* | ?:[\\/]* ) ;; + */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; + esac +# If the template does not know about datarootdir, expand it. +# FIXME: This hack should be removed a few years after 2.60. +ac_datarootdir_hack=; ac_datarootdir_seen= +ac_sed_dataroot=' +/datarootdir/ { + p + q +} +/@datadir@/p +/@docdir@/p +/@infodir@/p +/@localedir@/p +/@mandir@/p' +case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in +*datarootdir*) ac_datarootdir_seen=yes;; +*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +printf "%s\n" "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} + ac_datarootdir_hack=' + s&@datadir@&${datarootdir}&g + s&@docdir@&${datarootdir}/doc/${PACKAGE_TARNAME}&g + s&@infodir@&${datarootdir}/info&g + s&@localedir@&${datarootdir}/locale&g + s&@mandir@&${datarootdir}/man&g + s&\${datarootdir}&${prefix}/share&g' ;; +esac +ac_sed_extra="/^[ ]*VPATH[ ]*=[ ]*/{ +h +s/// +s/^/:/ +s/[ ]*$/:/ +s/:\$(srcdir):/:/g +s/:\${srcdir}:/:/g +s/:@srcdir@:/:/g +s/^:*// +s/:*$// +x +s/\(=[ ]*\).*/\1/ +G +s/\n// +s/^[^=]*=[ ]*$// +} + +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s|@configure_input@|$ac_sed_conf_input|;t t +s&@top_builddir@&$ac_top_builddir_sub&;t t +s&@top_build_prefix@&$ac_top_build_prefix&;t t +s&@srcdir@&$ac_srcdir&;t t +s&@abs_srcdir@&$ac_abs_srcdir&;t t +s&@top_srcdir@&$ac_top_srcdir&;t t +s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t +s&@builddir@&$ac_builddir&;t t +s&@abs_builddir@&$ac_abs_builddir&;t t +s&@abs_top_builddir@&$ac_abs_top_builddir&;t t +s&@INSTALL@&$ac_INSTALL&;t t +s&@MKDIR_P@&$ac_MKDIR_P&;t t +$ac_datarootdir_hack +" +eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ + >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + +test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ + "$ac_tmp/out"`; test -z "$ac_out"; } && + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&5 +printf "%s\n" "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&2;} + + rm -f "$ac_tmp/stdin" + case $ac_file in + -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; + *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; + esac \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + ;; + + + :C) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 +printf "%s\n" "$as_me: executing $ac_file commands" >&6;} + ;; + esac + + + case $ac_file$ac_mode in + "depfiles":C) test x"$AMDEP_TRUE" != x"" || { + # Older Autoconf quotes --file arguments for eval, but not when files + # are listed without --file. Let's play safe and only enable the eval + # if we detect the quoting. + # TODO: see whether this extra hack can be removed once we start + # requiring Autoconf 2.70 or later. + case $CONFIG_FILES in #( + *\'*) : + eval set x "$CONFIG_FILES" ;; #( + *) : + set x $CONFIG_FILES ;; #( + *) : + ;; +esac + shift + # Used to flag and report bootstrapping failures. + am_rc=0 + for am_mf + do + # Strip MF so we end up with the name of the file. + am_mf=`printf "%s\n" "$am_mf" | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile which includes + # dependency-tracking related rules and includes. + # Grep'ing the whole file directly is not great: AIX grep has a line + # limit of 2048, but all sed's we know have understand at least 4000. + sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \ + || continue + am_dirpart=`$as_dirname -- "$am_mf" || +$as_expr X"$am_mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$am_mf" : 'X\(//\)[^/]' \| \ + X"$am_mf" : 'X\(//\)$' \| \ + X"$am_mf" : 'X\(/\)' \| . 2>/dev/null || +printf "%s\n" X"$am_mf" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + am_filepart=`$as_basename -- "$am_mf" || +$as_expr X/"$am_mf" : '.*/\([^/][^/]*\)/*$' \| \ + X"$am_mf" : 'X\(//\)$' \| \ + X"$am_mf" : 'X\(/\)' \| . 2>/dev/null || +printf "%s\n" X/"$am_mf" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + { echo "$as_me:$LINENO: cd "$am_dirpart" \ + && sed -e '/# am--include-marker/d' "$am_filepart" \ + | $MAKE -f - am--depfiles" >&5 + (cd "$am_dirpart" \ + && sed -e '/# am--include-marker/d' "$am_filepart" \ + | $MAKE -f - am--depfiles) >&5 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } || am_rc=$? + done + if test $am_rc -ne 0; then + { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "Something went wrong bootstrapping makefile fragments + for automatic dependency tracking. If GNU make was not used, consider + re-running the configure script with MAKE=\"gmake\" (or whatever is + necessary). You can also try re-running configure with the + '--disable-dependency-tracking' option to at least be able to build + the package (albeit without support for automatic dependency tracking). +See \`config.log' for more details" "$LINENO" 5; } + fi + { am_dirpart=; unset am_dirpart;} + { am_filepart=; unset am_filepart;} + { am_mf=; unset am_mf;} + { am_rc=; unset am_rc;} + rm -f conftest-deps.mk +} + ;; + "libtool":C) + + # See if we are running on zsh, and set the options that allow our + # commands through without removal of \ escapes. + if test -n "${ZSH_VERSION+set}"; then + setopt NO_GLOB_SUBST + fi + + cfgfile=${ofile}T + trap "$RM \"$cfgfile\"; exit 1" 1 2 15 + $RM "$cfgfile" + + cat <<_LT_EOF >> "$cfgfile" +#! $SHELL +# Generated automatically by $as_me ($PACKAGE) $VERSION +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: +# NOTE: Changes made to this file will be lost: look at ltmain.sh. + +# Provide generalized library-building support services. +# Written by Gordon Matzigkeit, 1996 + +# Copyright (C) 2014 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program or library that is built +# using GNU Libtool, you may include this file under the same +# distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +# The names of the tagged configurations supported by this script. +available_tags='' + +# Configured defaults for sys_lib_dlsearch_path munging. +: \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"} + +# ### BEGIN LIBTOOL CONFIG + +# Which release of libtool.m4 was used? +macro_version=$macro_version +macro_revision=$macro_revision + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# What type of objects to build. +pic_mode=$pic_mode + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# Shared archive member basename,for filename based shared library versioning on AIX. +shared_archive_member_spec=$shared_archive_member_spec + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# An echo program that protects backslashes. +ECHO=$lt_ECHO + +# The PATH separator for the build system. +PATH_SEPARATOR=$lt_PATH_SEPARATOR + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# A sed program that does not truncate output. +SED=$lt_SED + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="\$SED -e 1s/^X//" + +# A grep program that handles long lines. +GREP=$lt_GREP + +# An ERE matcher. +EGREP=$lt_EGREP + +# A literal string matcher. +FGREP=$lt_FGREP + +# A BSD- or MS-compatible name lister. +NM=$lt_NM + +# Whether we need soft or hard links. +LN_S=$lt_LN_S + +# What is the maximum length of a command? +max_cmd_len=$max_cmd_len + +# Object file suffix (normally "o"). +objext=$ac_objext + +# Executable file suffix (normally ""). +exeext=$exeext + +# whether the shell understands "unset". +lt_unset=$lt_unset + +# turn spaces into newlines. +SP2NL=$lt_lt_SP2NL + +# turn newlines into spaces. +NL2SP=$lt_lt_NL2SP + +# convert \$build file names to \$host format. +to_host_file_cmd=$lt_cv_to_host_file_cmd + +# convert \$build files to toolchain format. +to_tool_file_cmd=$lt_cv_to_tool_file_cmd + +# A file(cmd) program that detects file types. +FILECMD=$lt_FILECMD + +# An object symbol dumper. +OBJDUMP=$lt_OBJDUMP + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method = "file_magic". +file_magic_cmd=$lt_file_magic_cmd + +# How to find potential files when deplibs_check_method = "file_magic". +file_magic_glob=$lt_file_magic_glob + +# Find potential files using nocaseglob when deplibs_check_method = "file_magic". +want_nocaseglob=$lt_want_nocaseglob + +# DLL creation program. +DLLTOOL=$lt_DLLTOOL + +# Command to associate shared and link libraries. +sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd + +# The archiver. +AR=$lt_AR + +# Flags to create an archive (by configure). +lt_ar_flags=$lt_ar_flags + +# Flags to create an archive. +AR_FLAGS=\${ARFLAGS-"\$lt_ar_flags"} + +# How to feed a file listing to the archiver. +archiver_list_spec=$lt_archiver_list_spec + +# A symbol stripping program. +STRIP=$lt_STRIP + +# Commands used to install an old-style archive. +RANLIB=$lt_RANLIB +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Whether to use a lock for old archive extraction. +lock_old_archive_extraction=$lock_old_archive_extraction + +# A C compiler. +LTCC=$lt_CC + +# LTCC compiler flags. +LTCFLAGS=$lt_CFLAGS + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration. +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm into a list of symbols to manually relocate. +global_symbol_to_import=$lt_lt_cv_sys_global_symbol_to_import + +# Transform the output of nm in a C name address pair. +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# Transform the output of nm in a C name address pair when lib prefix is needed. +global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix + +# The name lister interface. +nm_interface=$lt_lt_cv_nm_interface + +# Specify filename containing input files for \$NM. +nm_file_list_spec=$lt_nm_file_list_spec + +# The root where to search for dependent libraries,and where our libraries should be installed. +lt_sysroot=$lt_sysroot + +# Command to truncate a binary pipe. +lt_truncate_bin=$lt_lt_cv_truncate_bin + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# Used to examine libraries when file_magic_cmd begins with "file". +MAGIC_CMD=$MAGIC_CMD + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Manifest tool. +MANIFEST_TOOL=$lt_MANIFEST_TOOL + +# Tool to manipulate archived DWARF debug symbol files on Mac OS X. +DSYMUTIL=$lt_DSYMUTIL + +# Tool to change global to local symbols on Mac OS X. +NMEDIT=$lt_NMEDIT + +# Tool to manipulate fat objects and archives on Mac OS X. +LIPO=$lt_LIPO + +# ldd/readelf like tool for Mach-O binaries on Mac OS X. +OTOOL=$lt_OTOOL + +# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. +OTOOL64=$lt_OTOOL64 + +# Old archive suffix (normally "a"). +libext=$libext + +# Shared library suffix (normally ".so"). +shrext_cmds=$lt_shrext_cmds + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at link time. +variables_saved_for_relink=$lt_variables_saved_for_relink + +# Do we need the "lib" prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Library versioning type. +version_type=$version_type + +# Shared library runtime path variable. +runpath_var=$runpath_var + +# Shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Permission mode override for installation of shared libraries. +install_override_mode=$lt_install_override_mode + +# Command to use after installation of a shared archive. +postinstall_cmds=$lt_postinstall_cmds + +# Command to use after uninstallation of a shared archive. +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# As "finish_cmds", except a single script fragment to be evaled but +# not shown. +finish_eval=$lt_finish_eval + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Compile-time system search path for libraries. +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Detected run-time system search path for libraries. +sys_lib_dlsearch_path_spec=$lt_configure_time_dlsearch_path + +# Explicit LT_SYS_LIBRARY_PATH set during ./configure time. +configure_time_lt_sys_library_path=$lt_configure_time_lt_sys_library_path + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + + +# The linker used to build libraries. +LD=$lt_LD + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# Commands used to build an old-style archive. +old_archive_cmds=$lt_old_archive_cmds + +# A language specific compiler. +CC=$lt_compiler + +# Is the compiler the GNU compiler? +with_gcc=$GCC + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object=$lt_compiler_needs_object + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds + +# Commands used to build a shared archive. +archive_cmds=$lt_archive_cmds +archive_expsym_cmds=$lt_archive_expsym_cmds + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds=$lt_module_cmds +module_expsym_cmds=$lt_module_expsym_cmds + +# Whether we are building with GNU ld or not. +with_gnu_ld=$lt_with_gnu_ld + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag + +# Flag that enforces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=$hardcode_direct + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting \$shlibpath_var if the +# library is relocated. +hardcode_direct_absolute=$hardcode_direct_absolute + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=$hardcode_minus_L + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=$hardcode_automatic + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=$inherit_rpath + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs + +# Set to "yes" if exported symbols are required. +always_export_symbols=$always_export_symbols + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds=$lt_prelink_cmds + +# Commands necessary for finishing linking programs. +postlink_cmds=$lt_postlink_cmds + +# Specify filename containing input files. +file_list_spec=$lt_file_list_spec + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action + +# ### END LIBTOOL CONFIG + +_LT_EOF + + cat <<'_LT_EOF' >> "$cfgfile" + +# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE + +# func_munge_path_list VARIABLE PATH +# ----------------------------------- +# VARIABLE is name of variable containing _space_ separated list of +# directories to be munged by the contents of PATH, which is string +# having a format: +# "DIR[:DIR]:" +# string "DIR[ DIR]" will be prepended to VARIABLE +# ":DIR[:DIR]" +# string "DIR[ DIR]" will be appended to VARIABLE +# "DIRP[:DIRP]::[DIRA:]DIRA" +# string "DIRP[ DIRP]" will be prepended to VARIABLE and string +# "DIRA[ DIRA]" will be appended to VARIABLE +# "DIR[:DIR]" +# VARIABLE will be replaced by "DIR[ DIR]" +func_munge_path_list () +{ + case x$2 in + x) + ;; + *:) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" + ;; + x:*) + eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" + ;; + *::*) + eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" + eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" + ;; + *) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" + ;; + esac +} + + +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +func_cc_basename () +{ + for cc_temp in $*""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac + done + func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +} + + +# ### END FUNCTIONS SHARED WITH CONFIGURE + +_LT_EOF + + case $host_os in + aix3*) + cat <<\_LT_EOF >> "$cfgfile" +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test set != "${COLLECT_NAMES+set}"; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +_LT_EOF + ;; + esac + + + +ltmain=$ac_aux_dir/ltmain.sh + + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + $SED '$q' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) + + mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + + ;; + + esac +done # for ac_tag + + +as_fn_exit 0 diff --git a/pkg/enet/enet/config.sub b/pkg/enet/enet/config.sub new file mode 100755 index 000000000..d74fb6dea --- /dev/null +++ b/pkg/enet/enet/config.sub @@ -0,0 +1,1884 @@ +#! /bin/sh +# Configuration validation subroutine script. +# Copyright 1992-2021 Free Software Foundation, Inc. + +# shellcheck disable=SC2006,SC2268 # see below for rationale + +timestamp='2021-08-14' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). + + +# Please send patches to . +# +# Configuration subroutine to validate and canonicalize a configuration type. +# Supply the specified configuration type as an argument. +# If it is invalid, we print an error message on stderr and exit with code 1. +# Otherwise, we print the canonical config type on stdout and succeed. + +# You can get the latest version of this script from: +# https://git.savannah.gnu.org/cgit/config.git/plain/config.sub + +# This file is supposed to be the same for all GNU packages +# and recognize all the CPU types, system types and aliases +# that are meaningful with *any* GNU software. +# Each package is responsible for reporting which valid configurations +# it does not support. The user should be able to distinguish +# a failure to support a valid configuration from a meaningless +# configuration. + +# The goal of this file is to map all the various variations of a given +# machine specification into a single specification in the form: +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or in some cases, the newer four-part form: +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# It is wrong to echo any other type of specification. + +# The "shellcheck disable" line above the timestamp inhibits complaints +# about features and limitations of the classic Bourne shell that were +# superseded or lifted in POSIX. However, this script identifies a wide +# variety of pre-POSIX systems that do not have POSIX shells at all, and +# even some reasonably current systems (Solaris 10 as case-in-point) still +# have a pre-POSIX /bin/sh. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS + +Canonicalize a configuration name. + +Options: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.sub ($timestamp) + +Copyright 1992-2021 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + + *local*) + # First pass through any local machine types. + echo "$1" + exit ;; + + * ) + break ;; + esac +done + +case $# in + 0) echo "$me: missing argument$help" >&2 + exit 1;; + 1) ;; + *) echo "$me: too many arguments$help" >&2 + exit 1;; +esac + +# Split fields of configuration type +# shellcheck disable=SC2162 +saved_IFS=$IFS +IFS="-" read field1 field2 field3 field4 <&2 + exit 1 + ;; + *-*-*-*) + basic_machine=$field1-$field2 + basic_os=$field3-$field4 + ;; + *-*-*) + # Ambiguous whether COMPANY is present, or skipped and KERNEL-OS is two + # parts + maybe_os=$field2-$field3 + case $maybe_os in + nto-qnx* | linux-* | uclinux-uclibc* \ + | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* \ + | netbsd*-eabi* | kopensolaris*-gnu* | cloudabi*-eabi* \ + | storm-chaos* | os2-emx* | rtmk-nova*) + basic_machine=$field1 + basic_os=$maybe_os + ;; + android-linux) + basic_machine=$field1-unknown + basic_os=linux-android + ;; + *) + basic_machine=$field1-$field2 + basic_os=$field3 + ;; + esac + ;; + *-*) + # A lone config we happen to match not fitting any pattern + case $field1-$field2 in + decstation-3100) + basic_machine=mips-dec + basic_os= + ;; + *-*) + # Second component is usually, but not always the OS + case $field2 in + # Prevent following clause from handling this valid os + sun*os*) + basic_machine=$field1 + basic_os=$field2 + ;; + zephyr*) + basic_machine=$field1-unknown + basic_os=$field2 + ;; + # Manufacturers + dec* | mips* | sequent* | encore* | pc533* | sgi* | sony* \ + | att* | 7300* | 3300* | delta* | motorola* | sun[234]* \ + | unicom* | ibm* | next | hp | isi* | apollo | altos* \ + | convergent* | ncr* | news | 32* | 3600* | 3100* \ + | hitachi* | c[123]* | convex* | sun | crds | omron* | dg \ + | ultra | tti* | harris | dolphin | highlevel | gould \ + | cbm | ns | masscomp | apple | axis | knuth | cray \ + | microblaze* | sim | cisco \ + | oki | wec | wrs | winbond) + basic_machine=$field1-$field2 + basic_os= + ;; + *) + basic_machine=$field1 + basic_os=$field2 + ;; + esac + ;; + esac + ;; + *) + # Convert single-component short-hands not valid as part of + # multi-component configurations. + case $field1 in + 386bsd) + basic_machine=i386-pc + basic_os=bsd + ;; + a29khif) + basic_machine=a29k-amd + basic_os=udi + ;; + adobe68k) + basic_machine=m68010-adobe + basic_os=scout + ;; + alliant) + basic_machine=fx80-alliant + basic_os= + ;; + altos | altos3068) + basic_machine=m68k-altos + basic_os= + ;; + am29k) + basic_machine=a29k-none + basic_os=bsd + ;; + amdahl) + basic_machine=580-amdahl + basic_os=sysv + ;; + amiga) + basic_machine=m68k-unknown + basic_os= + ;; + amigaos | amigados) + basic_machine=m68k-unknown + basic_os=amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + basic_os=sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + basic_os=sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + basic_os=bsd + ;; + aros) + basic_machine=i386-pc + basic_os=aros + ;; + aux) + basic_machine=m68k-apple + basic_os=aux + ;; + balance) + basic_machine=ns32k-sequent + basic_os=dynix + ;; + blackfin) + basic_machine=bfin-unknown + basic_os=linux + ;; + cegcc) + basic_machine=arm-unknown + basic_os=cegcc + ;; + convex-c1) + basic_machine=c1-convex + basic_os=bsd + ;; + convex-c2) + basic_machine=c2-convex + basic_os=bsd + ;; + convex-c32) + basic_machine=c32-convex + basic_os=bsd + ;; + convex-c34) + basic_machine=c34-convex + basic_os=bsd + ;; + convex-c38) + basic_machine=c38-convex + basic_os=bsd + ;; + cray) + basic_machine=j90-cray + basic_os=unicos + ;; + crds | unos) + basic_machine=m68k-crds + basic_os= + ;; + da30) + basic_machine=m68k-da30 + basic_os= + ;; + decstation | pmax | pmin | dec3100 | decstatn) + basic_machine=mips-dec + basic_os= + ;; + delta88) + basic_machine=m88k-motorola + basic_os=sysv3 + ;; + dicos) + basic_machine=i686-pc + basic_os=dicos + ;; + djgpp) + basic_machine=i586-pc + basic_os=msdosdjgpp + ;; + ebmon29k) + basic_machine=a29k-amd + basic_os=ebmon + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + basic_os=ose + ;; + gmicro) + basic_machine=tron-gmicro + basic_os=sysv + ;; + go32) + basic_machine=i386-pc + basic_os=go32 + ;; + h8300hms) + basic_machine=h8300-hitachi + basic_os=hms + ;; + h8300xray) + basic_machine=h8300-hitachi + basic_os=xray + ;; + h8500hms) + basic_machine=h8500-hitachi + basic_os=hms + ;; + harris) + basic_machine=m88k-harris + basic_os=sysv3 + ;; + hp300 | hp300hpux) + basic_machine=m68k-hp + basic_os=hpux + ;; + hp300bsd) + basic_machine=m68k-hp + basic_os=bsd + ;; + hppaosf) + basic_machine=hppa1.1-hp + basic_os=osf + ;; + hppro) + basic_machine=hppa1.1-hp + basic_os=proelf + ;; + i386mach) + basic_machine=i386-mach + basic_os=mach + ;; + isi68 | isi) + basic_machine=m68k-isi + basic_os=sysv + ;; + m68knommu) + basic_machine=m68k-unknown + basic_os=linux + ;; + magnum | m3230) + basic_machine=mips-mips + basic_os=sysv + ;; + merlin) + basic_machine=ns32k-utek + basic_os=sysv + ;; + mingw64) + basic_machine=x86_64-pc + basic_os=mingw64 + ;; + mingw32) + basic_machine=i686-pc + basic_os=mingw32 + ;; + mingw32ce) + basic_machine=arm-unknown + basic_os=mingw32ce + ;; + monitor) + basic_machine=m68k-rom68k + basic_os=coff + ;; + morphos) + basic_machine=powerpc-unknown + basic_os=morphos + ;; + moxiebox) + basic_machine=moxie-unknown + basic_os=moxiebox + ;; + msdos) + basic_machine=i386-pc + basic_os=msdos + ;; + msys) + basic_machine=i686-pc + basic_os=msys + ;; + mvs) + basic_machine=i370-ibm + basic_os=mvs + ;; + nacl) + basic_machine=le32-unknown + basic_os=nacl + ;; + ncr3000) + basic_machine=i486-ncr + basic_os=sysv4 + ;; + netbsd386) + basic_machine=i386-pc + basic_os=netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + basic_os=linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + basic_os=newsos + ;; + news1000) + basic_machine=m68030-sony + basic_os=newsos + ;; + necv70) + basic_machine=v70-nec + basic_os=sysv + ;; + nh3000) + basic_machine=m68k-harris + basic_os=cxux + ;; + nh[45]000) + basic_machine=m88k-harris + basic_os=cxux + ;; + nindy960) + basic_machine=i960-intel + basic_os=nindy + ;; + mon960) + basic_machine=i960-intel + basic_os=mon960 + ;; + nonstopux) + basic_machine=mips-compaq + basic_os=nonstopux + ;; + os400) + basic_machine=powerpc-ibm + basic_os=os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + basic_os=ose + ;; + os68k) + basic_machine=m68k-none + basic_os=os68k + ;; + paragon) + basic_machine=i860-intel + basic_os=osf + ;; + parisc) + basic_machine=hppa-unknown + basic_os=linux + ;; + psp) + basic_machine=mipsallegrexel-sony + basic_os=psp + ;; + pw32) + basic_machine=i586-unknown + basic_os=pw32 + ;; + rdos | rdos64) + basic_machine=x86_64-pc + basic_os=rdos + ;; + rdos32) + basic_machine=i386-pc + basic_os=rdos + ;; + rom68k) + basic_machine=m68k-rom68k + basic_os=coff + ;; + sa29200) + basic_machine=a29k-amd + basic_os=udi + ;; + sei) + basic_machine=mips-sei + basic_os=seiux + ;; + sequent) + basic_machine=i386-sequent + basic_os= + ;; + sps7) + basic_machine=m68k-bull + basic_os=sysv2 + ;; + st2000) + basic_machine=m68k-tandem + basic_os= + ;; + stratus) + basic_machine=i860-stratus + basic_os=sysv4 + ;; + sun2) + basic_machine=m68000-sun + basic_os= + ;; + sun2os3) + basic_machine=m68000-sun + basic_os=sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + basic_os=sunos4 + ;; + sun3) + basic_machine=m68k-sun + basic_os= + ;; + sun3os3) + basic_machine=m68k-sun + basic_os=sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + basic_os=sunos4 + ;; + sun4) + basic_machine=sparc-sun + basic_os= + ;; + sun4os3) + basic_machine=sparc-sun + basic_os=sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + basic_os=sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + basic_os=solaris2 + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + basic_os= + ;; + sv1) + basic_machine=sv1-cray + basic_os=unicos + ;; + symmetry) + basic_machine=i386-sequent + basic_os=dynix + ;; + t3e) + basic_machine=alphaev5-cray + basic_os=unicos + ;; + t90) + basic_machine=t90-cray + basic_os=unicos + ;; + toad1) + basic_machine=pdp10-xkl + basic_os=tops20 + ;; + tpf) + basic_machine=s390x-ibm + basic_os=tpf + ;; + udi29k) + basic_machine=a29k-amd + basic_os=udi + ;; + ultra3) + basic_machine=a29k-nyu + basic_os=sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + basic_os=none + ;; + vaxv) + basic_machine=vax-dec + basic_os=sysv + ;; + vms) + basic_machine=vax-dec + basic_os=vms + ;; + vsta) + basic_machine=i386-pc + basic_os=vsta + ;; + vxworks960) + basic_machine=i960-wrs + basic_os=vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + basic_os=vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + basic_os=vxworks + ;; + xbox) + basic_machine=i686-pc + basic_os=mingw32 + ;; + ymp) + basic_machine=ymp-cray + basic_os=unicos + ;; + *) + basic_machine=$1 + basic_os= + ;; + esac + ;; +esac + +# Decode 1-component or ad-hoc basic machines +case $basic_machine in + # Here we handle the default manufacturer of certain CPU types. It is in + # some cases the only manufacturer, in others, it is the most popular. + w89k) + cpu=hppa1.1 + vendor=winbond + ;; + op50n) + cpu=hppa1.1 + vendor=oki + ;; + op60c) + cpu=hppa1.1 + vendor=oki + ;; + ibm*) + cpu=i370 + vendor=ibm + ;; + orion105) + cpu=clipper + vendor=highlevel + ;; + mac | mpw | mac-mpw) + cpu=m68k + vendor=apple + ;; + pmac | pmac-mpw) + cpu=powerpc + vendor=apple + ;; + + # Recognize the various machine names and aliases which stand + # for a CPU type and a company and sometimes even an OS. + 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) + cpu=m68000 + vendor=att + ;; + 3b*) + cpu=we32k + vendor=att + ;; + bluegene*) + cpu=powerpc + vendor=ibm + basic_os=cnk + ;; + decsystem10* | dec10*) + cpu=pdp10 + vendor=dec + basic_os=tops10 + ;; + decsystem20* | dec20*) + cpu=pdp10 + vendor=dec + basic_os=tops20 + ;; + delta | 3300 | motorola-3300 | motorola-delta \ + | 3300-motorola | delta-motorola) + cpu=m68k + vendor=motorola + ;; + dpx2*) + cpu=m68k + vendor=bull + basic_os=sysv3 + ;; + encore | umax | mmax) + cpu=ns32k + vendor=encore + ;; + elxsi) + cpu=elxsi + vendor=elxsi + basic_os=${basic_os:-bsd} + ;; + fx2800) + cpu=i860 + vendor=alliant + ;; + genix) + cpu=ns32k + vendor=ns + ;; + h3050r* | hiux*) + cpu=hppa1.1 + vendor=hitachi + basic_os=hiuxwe2 + ;; + hp3k9[0-9][0-9] | hp9[0-9][0-9]) + cpu=hppa1.0 + vendor=hp + ;; + hp9k2[0-9][0-9] | hp9k31[0-9]) + cpu=m68000 + vendor=hp + ;; + hp9k3[2-9][0-9]) + cpu=m68k + vendor=hp + ;; + hp9k6[0-9][0-9] | hp6[0-9][0-9]) + cpu=hppa1.0 + vendor=hp + ;; + hp9k7[0-79][0-9] | hp7[0-79][0-9]) + cpu=hppa1.1 + vendor=hp + ;; + hp9k78[0-9] | hp78[0-9]) + # FIXME: really hppa2.0-hp + cpu=hppa1.1 + vendor=hp + ;; + hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) + # FIXME: really hppa2.0-hp + cpu=hppa1.1 + vendor=hp + ;; + hp9k8[0-9][13679] | hp8[0-9][13679]) + cpu=hppa1.1 + vendor=hp + ;; + hp9k8[0-9][0-9] | hp8[0-9][0-9]) + cpu=hppa1.0 + vendor=hp + ;; + i*86v32) + cpu=`echo "$1" | sed -e 's/86.*/86/'` + vendor=pc + basic_os=sysv32 + ;; + i*86v4*) + cpu=`echo "$1" | sed -e 's/86.*/86/'` + vendor=pc + basic_os=sysv4 + ;; + i*86v) + cpu=`echo "$1" | sed -e 's/86.*/86/'` + vendor=pc + basic_os=sysv + ;; + i*86sol2) + cpu=`echo "$1" | sed -e 's/86.*/86/'` + vendor=pc + basic_os=solaris2 + ;; + j90 | j90-cray) + cpu=j90 + vendor=cray + basic_os=${basic_os:-unicos} + ;; + iris | iris4d) + cpu=mips + vendor=sgi + case $basic_os in + irix*) + ;; + *) + basic_os=irix4 + ;; + esac + ;; + miniframe) + cpu=m68000 + vendor=convergent + ;; + *mint | mint[0-9]* | *MiNT | *MiNT[0-9]*) + cpu=m68k + vendor=atari + basic_os=mint + ;; + news-3600 | risc-news) + cpu=mips + vendor=sony + basic_os=newsos + ;; + next | m*-next) + cpu=m68k + vendor=next + case $basic_os in + openstep*) + ;; + nextstep*) + ;; + ns2*) + basic_os=nextstep2 + ;; + *) + basic_os=nextstep3 + ;; + esac + ;; + np1) + cpu=np1 + vendor=gould + ;; + op50n-* | op60c-*) + cpu=hppa1.1 + vendor=oki + basic_os=proelf + ;; + pa-hitachi) + cpu=hppa1.1 + vendor=hitachi + basic_os=hiuxwe2 + ;; + pbd) + cpu=sparc + vendor=tti + ;; + pbb) + cpu=m68k + vendor=tti + ;; + pc532) + cpu=ns32k + vendor=pc532 + ;; + pn) + cpu=pn + vendor=gould + ;; + power) + cpu=power + vendor=ibm + ;; + ps2) + cpu=i386 + vendor=ibm + ;; + rm[46]00) + cpu=mips + vendor=siemens + ;; + rtpc | rtpc-*) + cpu=romp + vendor=ibm + ;; + sde) + cpu=mipsisa32 + vendor=sde + basic_os=${basic_os:-elf} + ;; + simso-wrs) + cpu=sparclite + vendor=wrs + basic_os=vxworks + ;; + tower | tower-32) + cpu=m68k + vendor=ncr + ;; + vpp*|vx|vx-*) + cpu=f301 + vendor=fujitsu + ;; + w65) + cpu=w65 + vendor=wdc + ;; + w89k-*) + cpu=hppa1.1 + vendor=winbond + basic_os=proelf + ;; + none) + cpu=none + vendor=none + ;; + leon|leon[3-9]) + cpu=sparc + vendor=$basic_machine + ;; + leon-*|leon[3-9]-*) + cpu=sparc + vendor=`echo "$basic_machine" | sed 's/-.*//'` + ;; + + *-*) + # shellcheck disable=SC2162 + saved_IFS=$IFS + IFS="-" read cpu vendor <&2 + exit 1 + ;; + esac + ;; +esac + +# Here we canonicalize certain aliases for manufacturers. +case $vendor in + digital*) + vendor=dec + ;; + commodore*) + vendor=cbm + ;; + *) + ;; +esac + +# Decode manufacturer-specific aliases for certain operating systems. + +if test x$basic_os != x +then + +# First recognize some ad-hoc caes, or perhaps split kernel-os, or else just +# set os. +case $basic_os in + gnu/linux*) + kernel=linux + os=`echo "$basic_os" | sed -e 's|gnu/linux|gnu|'` + ;; + os2-emx) + kernel=os2 + os=`echo "$basic_os" | sed -e 's|os2-emx|emx|'` + ;; + nto-qnx*) + kernel=nto + os=`echo "$basic_os" | sed -e 's|nto-qnx|qnx|'` + ;; + *-*) + # shellcheck disable=SC2162 + saved_IFS=$IFS + IFS="-" read kernel os <&2 + exit 1 + ;; +esac + +# As a final step for OS-related things, validate the OS-kernel combination +# (given a valid OS), if there is a kernel. +case $kernel-$os in + linux-gnu* | linux-dietlibc* | linux-android* | linux-newlib* \ + | linux-musl* | linux-relibc* | linux-uclibc* ) + ;; + uclinux-uclibc* ) + ;; + -dietlibc* | -newlib* | -musl* | -relibc* | -uclibc* ) + # These are just libc implementations, not actual OSes, and thus + # require a kernel. + echo "Invalid configuration \`$1': libc \`$os' needs explicit kernel." 1>&2 + exit 1 + ;; + kfreebsd*-gnu* | kopensolaris*-gnu*) + ;; + vxworks-simlinux | vxworks-simwindows | vxworks-spe) + ;; + nto-qnx*) + ;; + os2-emx) + ;; + *-eabi* | *-gnueabi*) + ;; + -*) + # Blank kernel with real OS is always fine. + ;; + *-*) + echo "Invalid configuration \`$1': Kernel \`$kernel' not known to work with OS \`$os'." 1>&2 + exit 1 + ;; +esac + +# Here we handle the case where we know the os, and the CPU type, but not the +# manufacturer. We pick the logical manufacturer. +case $vendor in + unknown) + case $cpu-$os in + *-riscix*) + vendor=acorn + ;; + *-sunos*) + vendor=sun + ;; + *-cnk* | *-aix*) + vendor=ibm + ;; + *-beos*) + vendor=be + ;; + *-hpux*) + vendor=hp + ;; + *-mpeix*) + vendor=hp + ;; + *-hiux*) + vendor=hitachi + ;; + *-unos*) + vendor=crds + ;; + *-dgux*) + vendor=dg + ;; + *-luna*) + vendor=omron + ;; + *-genix*) + vendor=ns + ;; + *-clix*) + vendor=intergraph + ;; + *-mvs* | *-opened*) + vendor=ibm + ;; + *-os400*) + vendor=ibm + ;; + s390-* | s390x-*) + vendor=ibm + ;; + *-ptx*) + vendor=sequent + ;; + *-tpf*) + vendor=ibm + ;; + *-vxsim* | *-vxworks* | *-windiss*) + vendor=wrs + ;; + *-aux*) + vendor=apple + ;; + *-hms*) + vendor=hitachi + ;; + *-mpw* | *-macos*) + vendor=apple + ;; + *-*mint | *-mint[0-9]* | *-*MiNT | *-MiNT[0-9]*) + vendor=atari + ;; + *-vos*) + vendor=stratus + ;; + esac + ;; +esac + +echo "$cpu-$vendor-${kernel:+$kernel-}$os" +exit + +# Local variables: +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/pkg/enet/enet/configure b/pkg/enet/enet/configure new file mode 100755 index 000000000..740f83f68 --- /dev/null +++ b/pkg/enet/enet/configure @@ -0,0 +1,14875 @@ +#! /bin/sh +# Guess values for system-dependent variables and create Makefiles. +# Generated by GNU Autoconf 2.71 for libenet 1.3.18. +# +# +# Copyright (C) 1992-1996, 1998-2017, 2020-2021 Free Software Foundation, +# Inc. +# +# +# This configure script is free software; the Free Software Foundation +# gives unlimited permission to copy, distribute and modify it. +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +as_nop=: +if test ${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1 +then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else $as_nop + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + + +# Reset variables that may have inherited troublesome values from +# the environment. + +# IFS needs to be set, to space, tab, and newline, in precisely that order. +# (If _AS_PATH_WALK were called with IFS unset, it would have the +# side effect of setting IFS to empty, thus disabling word splitting.) +# Quoting is to prevent editors from complaining about space-tab. +as_nl=' +' +export as_nl +IFS=" "" $as_nl" + +PS1='$ ' +PS2='> ' +PS4='+ ' + +# Ensure predictable behavior from utilities with locale-dependent output. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# We cannot yet rely on "unset" to work, but we need these variables +# to be unset--not just set to an empty or harmless value--now, to +# avoid bugs in old shells (e.g. pre-3.0 UWIN ksh). This construct +# also avoids known problems related to "unset" and subshell syntax +# in other old shells (e.g. bash 2.01 and pdksh 5.2.14). +for as_var in BASH_ENV ENV MAIL MAILPATH CDPATH +do eval test \${$as_var+y} \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done + +# Ensure that fds 0, 1, and 2 are open. +if (exec 3>&0) 2>/dev/null; then :; else exec 0&1) 2>/dev/null; then :; else exec 1>/dev/null; fi +if (exec 3>&2) ; then :; else exec 2>/dev/null; fi + +# The user is always right. +if ${PATH_SEPARATOR+false} :; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + test -r "$as_dir$0" && as_myself=$as_dir$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + printf "%s\n" "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + + +# Use a proper internal environment variable to ensure we don't fall + # into an infinite loop, continuously re-executing ourselves. + if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then + _as_can_reexec=no; export _as_can_reexec; + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +printf "%s\n" "$0: could not re-execute with $CONFIG_SHELL" >&2 +exit 255 + fi + # We don't want this to propagate to other subprocesses. + { _as_can_reexec=; unset _as_can_reexec;} +if test "x$CONFIG_SHELL" = x; then + as_bourne_compatible="as_nop=: +if test \${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1 +then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else \$as_nop + case \`(set -o) 2>/dev/null\` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi +" + as_required="as_fn_return () { (exit \$1); } +as_fn_success () { as_fn_return 0; } +as_fn_failure () { as_fn_return 1; } +as_fn_ret_success () { return 0; } +as_fn_ret_failure () { return 1; } + +exitcode=0 +as_fn_success || { exitcode=1; echo as_fn_success failed.; } +as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } +as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } +as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } +if ( set x; as_fn_ret_success y && test x = \"\$1\" ) +then : + +else \$as_nop + exitcode=1; echo positional parameters were not saved. +fi +test x\$exitcode = x0 || exit 1 +blah=\$(echo \$(echo blah)) +test x\"\$blah\" = xblah || exit 1 +test -x / || exit 1" + as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO + as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO + eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && + test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 + + test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || ( + ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' + ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO + ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO + PATH=/empty FPATH=/empty; export PATH FPATH + test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\ + || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1 +test \$(( 1 + 1 )) = 2 || exit 1" + if (eval "$as_required") 2>/dev/null +then : + as_have_required=yes +else $as_nop + as_have_required=no +fi + if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null +then : + +else $as_nop + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +as_found=false +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + as_found=: + case $as_dir in #( + /*) + for as_base in sh bash ksh sh5; do + # Try only shells that exist, to save several forks. + as_shell=$as_dir$as_base + if { test -f "$as_shell" || test -f "$as_shell.exe"; } && + as_run=a "$as_shell" -c "$as_bourne_compatible""$as_required" 2>/dev/null +then : + CONFIG_SHELL=$as_shell as_have_required=yes + if as_run=a "$as_shell" -c "$as_bourne_compatible""$as_suggested" 2>/dev/null +then : + break 2 +fi +fi + done;; + esac + as_found=false +done +IFS=$as_save_IFS +if $as_found +then : + +else $as_nop + if { test -f "$SHELL" || test -f "$SHELL.exe"; } && + as_run=a "$SHELL" -c "$as_bourne_compatible""$as_required" 2>/dev/null +then : + CONFIG_SHELL=$SHELL as_have_required=yes +fi +fi + + + if test "x$CONFIG_SHELL" != x +then : + export CONFIG_SHELL + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +printf "%s\n" "$0: could not re-execute with $CONFIG_SHELL" >&2 +exit 255 +fi + + if test x$as_have_required = xno +then : + printf "%s\n" "$0: This script requires a shell more modern than all" + printf "%s\n" "$0: the shells that I found on your system." + if test ${ZSH_VERSION+y} ; then + printf "%s\n" "$0: In particular, zsh $ZSH_VERSION has bugs and should" + printf "%s\n" "$0: be upgraded to zsh 4.3.4 or later." + else + printf "%s\n" "$0: Please tell bug-autoconf@gnu.org about your system, +$0: including any error possibly output before this +$0: message. Then install a modern shell, or manually run +$0: the script under such a shell if you do have one." + fi + exit 1 +fi +fi +fi +SHELL=${CONFIG_SHELL-/bin/sh} +export SHELL +# Unset more variables known to interfere with behavior of common tools. +CLICOLOR_FORCE= GREP_OPTIONS= +unset CLICOLOR_FORCE GREP_OPTIONS + +## --------------------- ## +## M4sh Shell Functions. ## +## --------------------- ## +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset + + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit +# as_fn_nop +# --------- +# Do nothing but, unlike ":", preserve the value of $?. +as_fn_nop () +{ + return $? +} +as_nop=as_fn_nop + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`printf "%s\n" "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +printf "%s\n" X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null +then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else $as_nop + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null +then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else $as_nop + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + +# as_fn_nop +# --------- +# Do nothing but, unlike ":", preserve the value of $?. +as_fn_nop () +{ + return $? +} +as_nop=as_fn_nop + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + printf "%s\n" "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +printf "%s\n" X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + + + as_lineno_1=$LINENO as_lineno_1a=$LINENO + as_lineno_2=$LINENO as_lineno_2a=$LINENO + eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && + test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { + # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { printf "%s\n" "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } + + # If we had to re-execute with $CONFIG_SHELL, we're ensured to have + # already done that, so ensure we don't try to do so again and fall + # in an infinite loop. This has already happened in practice. + _as_can_reexec=no; export _as_can_reexec + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + + +# Determine whether it's possible to make 'echo' print without a newline. +# These variables are no longer used directly by Autoconf, but are AC_SUBSTed +# for compatibility with existing Makefiles. +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +# For backward compatibility with old third-party macros, we provide +# the shell variables $as_echo and $as_echo_n. New code should use +# AS_ECHO(["message"]) and AS_ECHO_N(["message"]), respectively. +as_echo='printf %s\n' +as_echo_n='printf %s' + + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + +SHELL=${CONFIG_SHELL-/bin/sh} + + +test -n "$DJDIR" || exec 7<&0 &1 + +# Name of the host. +# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, +# so uname gets run too. +ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` + +# +# Initializations. +# +ac_default_prefix=/usr/local +ac_clean_files= +ac_config_libobj_dir=. +LIBOBJS= +cross_compiling=no +subdirs= +MFLAGS= +MAKEFLAGS= + +# Identity of this package. +PACKAGE_NAME='libenet' +PACKAGE_TARNAME='libenet' +PACKAGE_VERSION='1.3.18' +PACKAGE_STRING='libenet 1.3.18' +PACKAGE_BUGREPORT='' +PACKAGE_URL='' + +ac_unique_file="include/enet/enet.h" +# Factoring default headers for most tests. +ac_includes_default="\ +#include +#ifdef HAVE_STDIO_H +# include +#endif +#ifdef HAVE_STDLIB_H +# include +#endif +#ifdef HAVE_STRING_H +# include +#endif +#ifdef HAVE_INTTYPES_H +# include +#endif +#ifdef HAVE_STDINT_H +# include +#endif +#ifdef HAVE_STRINGS_H +# include +#endif +#ifdef HAVE_SYS_TYPES_H +# include +#endif +#ifdef HAVE_SYS_STAT_H +# include +#endif +#ifdef HAVE_UNISTD_H +# include +#endif" + +ac_header_c_list= +ac_subst_vars='am__EXEEXT_FALSE +am__EXEEXT_TRUE +LTLIBOBJS +LIBOBJS +LT_SYS_LIBRARY_PATH +OTOOL64 +OTOOL +LIPO +NMEDIT +DSYMUTIL +MANIFEST_TOOL +RANLIB +ac_ct_AR +AR +DLLTOOL +OBJDUMP +FILECMD +LN_S +NM +ac_ct_DUMPBIN +DUMPBIN +LD +FGREP +EGREP +GREP +SED +host_os +host_vendor +host_cpu +host +build_os +build_vendor +build_cpu +build +LIBTOOL +am__fastdepCC_FALSE +am__fastdepCC_TRUE +CCDEPMODE +am__nodep +AMDEPBACKSLASH +AMDEP_FALSE +AMDEP_TRUE +am__include +DEPDIR +OBJEXT +EXEEXT +ac_ct_CC +CPPFLAGS +LDFLAGS +CFLAGS +CC +AM_BACKSLASH +AM_DEFAULT_VERBOSITY +AM_DEFAULT_V +AM_V +CSCOPE +ETAGS +CTAGS +am__untar +am__tar +AMTAR +am__leading_dot +SET_MAKE +AWK +mkdir_p +MKDIR_P +INSTALL_STRIP_PROGRAM +STRIP +install_sh +MAKEINFO +AUTOHEADER +AUTOMAKE +AUTOCONF +ACLOCAL +VERSION +PACKAGE +CYGPATH_W +am__isrc +INSTALL_DATA +INSTALL_SCRIPT +INSTALL_PROGRAM +target_alias +host_alias +build_alias +LIBS +ECHO_T +ECHO_N +ECHO_C +DEFS +mandir +localedir +libdir +psdir +pdfdir +dvidir +htmldir +infodir +docdir +oldincludedir +includedir +runstatedir +localstatedir +sharedstatedir +sysconfdir +datadir +datarootdir +libexecdir +sbindir +bindir +program_transform_name +prefix +exec_prefix +PACKAGE_URL +PACKAGE_BUGREPORT +PACKAGE_STRING +PACKAGE_VERSION +PACKAGE_TARNAME +PACKAGE_NAME +PATH_SEPARATOR +SHELL +am__quote' +ac_subst_files='' +ac_user_opts=' +enable_option_checking +enable_silent_rules +enable_dependency_tracking +enable_shared +enable_static +with_pic +enable_fast_install +with_aix_soname +with_gnu_ld +with_sysroot +enable_libtool_lock +' + ac_precious_vars='build_alias +host_alias +target_alias +CC +CFLAGS +LDFLAGS +LIBS +CPPFLAGS +LT_SYS_LIBRARY_PATH' + + +# Initialize some variables set by options. +ac_init_help= +ac_init_version=false +ac_unrecognized_opts= +ac_unrecognized_sep= +# The variables have the same names as the options, with +# dashes changed to underlines. +cache_file=/dev/null +exec_prefix=NONE +no_create= +no_recursion= +prefix=NONE +program_prefix=NONE +program_suffix=NONE +program_transform_name=s,x,x, +silent= +site= +srcdir= +verbose= +x_includes=NONE +x_libraries=NONE + +# Installation directory options. +# These are left unexpanded so users can "make install exec_prefix=/foo" +# and all the variables that are supposed to be based on exec_prefix +# by default will actually change. +# Use braces instead of parens because sh, perl, etc. also accept them. +# (The list follows the same order as the GNU Coding Standards.) +bindir='${exec_prefix}/bin' +sbindir='${exec_prefix}/sbin' +libexecdir='${exec_prefix}/libexec' +datarootdir='${prefix}/share' +datadir='${datarootdir}' +sysconfdir='${prefix}/etc' +sharedstatedir='${prefix}/com' +localstatedir='${prefix}/var' +runstatedir='${localstatedir}/run' +includedir='${prefix}/include' +oldincludedir='/usr/include' +docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' +infodir='${datarootdir}/info' +htmldir='${docdir}' +dvidir='${docdir}' +pdfdir='${docdir}' +psdir='${docdir}' +libdir='${exec_prefix}/lib' +localedir='${datarootdir}/locale' +mandir='${datarootdir}/man' + +ac_prev= +ac_dashdash= +for ac_option +do + # If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval $ac_prev=\$ac_option + ac_prev= + continue + fi + + case $ac_option in + *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; + *=) ac_optarg= ;; + *) ac_optarg=yes ;; + esac + + case $ac_dashdash$ac_option in + --) + ac_dashdash=yes ;; + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir=$ac_optarg ;; + + -build | --build | --buil | --bui | --bu) + ac_prev=build_alias ;; + -build=* | --build=* | --buil=* | --bui=* | --bu=*) + build_alias=$ac_optarg ;; + + -cache-file | --cache-file | --cache-fil | --cache-fi \ + | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) + ac_prev=cache_file ;; + -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ + | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) + cache_file=$ac_optarg ;; + + --config-cache | -C) + cache_file=config.cache ;; + + -datadir | --datadir | --datadi | --datad) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=*) + datadir=$ac_optarg ;; + + -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ + | --dataroo | --dataro | --datar) + ac_prev=datarootdir ;; + -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ + | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) + datarootdir=$ac_optarg ;; + + -disable-* | --disable-*) + ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: \`$ac_useropt'" + ac_useropt_orig=$ac_useropt + ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=no ;; + + -docdir | --docdir | --docdi | --doc | --do) + ac_prev=docdir ;; + -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) + docdir=$ac_optarg ;; + + -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) + ac_prev=dvidir ;; + -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) + dvidir=$ac_optarg ;; + + -enable-* | --enable-*) + ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: \`$ac_useropt'" + ac_useropt_orig=$ac_useropt + ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=\$ac_optarg ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix=$ac_optarg ;; + + -gas | --gas | --ga | --g) + # Obsolete; use --with-gas. + with_gas=yes ;; + + -help | --help | --hel | --he | -h) + ac_init_help=long ;; + -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) + ac_init_help=recursive ;; + -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) + ac_init_help=short ;; + + -host | --host | --hos | --ho) + ac_prev=host_alias ;; + -host=* | --host=* | --hos=* | --ho=*) + host_alias=$ac_optarg ;; + + -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) + ac_prev=htmldir ;; + -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ + | --ht=*) + htmldir=$ac_optarg ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir=$ac_optarg ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir=$ac_optarg ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir=$ac_optarg ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir=$ac_optarg ;; + + -localedir | --localedir | --localedi | --localed | --locale) + ac_prev=localedir ;; + -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) + localedir=$ac_optarg ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst | --locals) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) + localstatedir=$ac_optarg ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir=$ac_optarg ;; + + -nfp | --nfp | --nf) + # Obsolete; use --without-fp. + with_fp=no ;; + + -no-create | --no-create | --no-creat | --no-crea | --no-cre \ + | --no-cr | --no-c | -n) + no_create=yes ;; + + -no-recursion | --no-recursion | --no-recursio | --no-recursi \ + | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) + no_recursion=yes ;; + + -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ + | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ + | --oldin | --oldi | --old | --ol | --o) + ac_prev=oldincludedir ;; + -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ + | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ + | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) + oldincludedir=$ac_optarg ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix=$ac_optarg ;; + + -program-prefix | --program-prefix | --program-prefi | --program-pref \ + | --program-pre | --program-pr | --program-p) + ac_prev=program_prefix ;; + -program-prefix=* | --program-prefix=* | --program-prefi=* \ + | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) + program_prefix=$ac_optarg ;; + + -program-suffix | --program-suffix | --program-suffi | --program-suff \ + | --program-suf | --program-su | --program-s) + ac_prev=program_suffix ;; + -program-suffix=* | --program-suffix=* | --program-suffi=* \ + | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) + program_suffix=$ac_optarg ;; + + -program-transform-name | --program-transform-name \ + | --program-transform-nam | --program-transform-na \ + | --program-transform-n | --program-transform- \ + | --program-transform | --program-transfor \ + | --program-transfo | --program-transf \ + | --program-trans | --program-tran \ + | --progr-tra | --program-tr | --program-t) + ac_prev=program_transform_name ;; + -program-transform-name=* | --program-transform-name=* \ + | --program-transform-nam=* | --program-transform-na=* \ + | --program-transform-n=* | --program-transform-=* \ + | --program-transform=* | --program-transfor=* \ + | --program-transfo=* | --program-transf=* \ + | --program-trans=* | --program-tran=* \ + | --progr-tra=* | --program-tr=* | --program-t=*) + program_transform_name=$ac_optarg ;; + + -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) + ac_prev=pdfdir ;; + -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) + pdfdir=$ac_optarg ;; + + -psdir | --psdir | --psdi | --psd | --ps) + ac_prev=psdir ;; + -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) + psdir=$ac_optarg ;; + + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + silent=yes ;; + + -runstatedir | --runstatedir | --runstatedi | --runstated \ + | --runstate | --runstat | --runsta | --runst | --runs \ + | --run | --ru | --r) + ac_prev=runstatedir ;; + -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \ + | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \ + | --run=* | --ru=* | --r=*) + runstatedir=$ac_optarg ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir=$ac_optarg ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir=$ac_optarg ;; + + -site | --site | --sit) + ac_prev=site ;; + -site=* | --site=* | --sit=*) + site=$ac_optarg ;; + + -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) + ac_prev=srcdir ;; + -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) + srcdir=$ac_optarg ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir=$ac_optarg ;; + + -target | --target | --targe | --targ | --tar | --ta | --t) + ac_prev=target_alias ;; + -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) + target_alias=$ac_optarg ;; + + -v | -verbose | --verbose | --verbos | --verbo | --verb) + verbose=yes ;; + + -version | --version | --versio | --versi | --vers | -V) + ac_init_version=: ;; + + -with-* | --with-*) + ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: \`$ac_useropt'" + ac_useropt_orig=$ac_useropt + ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=\$ac_optarg ;; + + -without-* | --without-*) + ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: \`$ac_useropt'" + ac_useropt_orig=$ac_useropt + ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=no ;; + + --x) + # Obsolete; use --with-x. + with_x=yes ;; + + -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ + | --x-incl | --x-inc | --x-in | --x-i) + ac_prev=x_includes ;; + -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ + | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) + x_includes=$ac_optarg ;; + + -x-libraries | --x-libraries | --x-librarie | --x-librari \ + | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) + ac_prev=x_libraries ;; + -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ + | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) + x_libraries=$ac_optarg ;; + + -*) as_fn_error $? "unrecognized option: \`$ac_option' +Try \`$0 --help' for more information" + ;; + + *=*) + ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` + # Reject names that are not valid shell variable names. + case $ac_envvar in #( + '' | [0-9]* | *[!_$as_cr_alnum]* ) + as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; + esac + eval $ac_envvar=\$ac_optarg + export $ac_envvar ;; + + *) + # FIXME: should be removed in autoconf 3.0. + printf "%s\n" "$as_me: WARNING: you should use --build, --host, --target" >&2 + expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && + printf "%s\n" "$as_me: WARNING: invalid host type: $ac_option" >&2 + : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" + ;; + + esac +done + +if test -n "$ac_prev"; then + ac_option=--`echo $ac_prev | sed 's/_/-/g'` + as_fn_error $? "missing argument to $ac_option" +fi + +if test -n "$ac_unrecognized_opts"; then + case $enable_option_checking in + no) ;; + fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; + *) printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; + esac +fi + +# Check all directory arguments for consistency. +for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ + datadir sysconfdir sharedstatedir localstatedir includedir \ + oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ + libdir localedir mandir runstatedir +do + eval ac_val=\$$ac_var + # Remove trailing slashes. + case $ac_val in + */ ) + ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` + eval $ac_var=\$ac_val;; + esac + # Be sure to have absolute directory names. + case $ac_val in + [\\/$]* | ?:[\\/]* ) continue;; + NONE | '' ) case $ac_var in *prefix ) continue;; esac;; + esac + as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" +done + +# There might be people who depend on the old broken behavior: `$host' +# used to hold the argument of --host etc. +# FIXME: To remove some day. +build=$build_alias +host=$host_alias +target=$target_alias + +# FIXME: To remove some day. +if test "x$host_alias" != x; then + if test "x$build_alias" = x; then + cross_compiling=maybe + elif test "x$build_alias" != "x$host_alias"; then + cross_compiling=yes + fi +fi + +ac_tool_prefix= +test -n "$host_alias" && ac_tool_prefix=$host_alias- + +test "$silent" = yes && exec 6>/dev/null + + +ac_pwd=`pwd` && test -n "$ac_pwd" && +ac_ls_di=`ls -di .` && +ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || + as_fn_error $? "working directory cannot be determined" +test "X$ac_ls_di" = "X$ac_pwd_ls_di" || + as_fn_error $? "pwd does not report name of working directory" + + +# Find the source files, if location was not specified. +if test -z "$srcdir"; then + ac_srcdir_defaulted=yes + # Try the directory containing this script, then the parent directory. + ac_confdir=`$as_dirname -- "$as_myself" || +$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_myself" : 'X\(//\)[^/]' \| \ + X"$as_myself" : 'X\(//\)$' \| \ + X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || +printf "%s\n" X"$as_myself" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + srcdir=$ac_confdir + if test ! -r "$srcdir/$ac_unique_file"; then + srcdir=.. + fi +else + ac_srcdir_defaulted=no +fi +if test ! -r "$srcdir/$ac_unique_file"; then + test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." + as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" +fi +ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" +ac_abs_confdir=`( + cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" + pwd)` +# When building in place, set srcdir=. +if test "$ac_abs_confdir" = "$ac_pwd"; then + srcdir=. +fi +# Remove unnecessary trailing slashes from srcdir. +# Double slashes in file names in object file debugging info +# mess up M-x gdb in Emacs. +case $srcdir in +*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; +esac +for ac_var in $ac_precious_vars; do + eval ac_env_${ac_var}_set=\${${ac_var}+set} + eval ac_env_${ac_var}_value=\$${ac_var} + eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} + eval ac_cv_env_${ac_var}_value=\$${ac_var} +done + +# +# Report the --help message. +# +if test "$ac_init_help" = "long"; then + # Omit some internal or obsolete options to make the list less imposing. + # This message is too long to be a string in the A/UX 3.1 sh. + cat <<_ACEOF +\`configure' configures libenet 1.3.18 to adapt to many kinds of systems. + +Usage: $0 [OPTION]... [VAR=VALUE]... + +To assign environment variables (e.g., CC, CFLAGS...), specify them as +VAR=VALUE. See below for descriptions of some of the useful variables. + +Defaults for the options are specified in brackets. + +Configuration: + -h, --help display this help and exit + --help=short display options specific to this package + --help=recursive display the short help of all the included packages + -V, --version display version information and exit + -q, --quiet, --silent do not print \`checking ...' messages + --cache-file=FILE cache test results in FILE [disabled] + -C, --config-cache alias for \`--cache-file=config.cache' + -n, --no-create do not create output files + --srcdir=DIR find the sources in DIR [configure dir or \`..'] + +Installation directories: + --prefix=PREFIX install architecture-independent files in PREFIX + [$ac_default_prefix] + --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX + [PREFIX] + +By default, \`make install' will install all the files in +\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify +an installation prefix other than \`$ac_default_prefix' using \`--prefix', +for instance \`--prefix=\$HOME'. + +For better control, use the options below. + +Fine tuning of the installation directories: + --bindir=DIR user executables [EPREFIX/bin] + --sbindir=DIR system admin executables [EPREFIX/sbin] + --libexecdir=DIR program executables [EPREFIX/libexec] + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] + --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] + --datadir=DIR read-only architecture-independent data [DATAROOTDIR] + --infodir=DIR info documentation [DATAROOTDIR/info] + --localedir=DIR locale-dependent data [DATAROOTDIR/locale] + --mandir=DIR man documentation [DATAROOTDIR/man] + --docdir=DIR documentation root [DATAROOTDIR/doc/libenet] + --htmldir=DIR html documentation [DOCDIR] + --dvidir=DIR dvi documentation [DOCDIR] + --pdfdir=DIR pdf documentation [DOCDIR] + --psdir=DIR ps documentation [DOCDIR] +_ACEOF + + cat <<\_ACEOF + +Program names: + --program-prefix=PREFIX prepend PREFIX to installed program names + --program-suffix=SUFFIX append SUFFIX to installed program names + --program-transform-name=PROGRAM run sed PROGRAM on installed program names + +System types: + --build=BUILD configure for building on BUILD [guessed] + --host=HOST cross-compile to build programs to run on HOST [BUILD] +_ACEOF +fi + +if test -n "$ac_init_help"; then + case $ac_init_help in + short | recursive ) echo "Configuration of libenet 1.3.18:";; + esac + cat <<\_ACEOF + +Optional Features: + --disable-option-checking ignore unrecognized --enable/--with options + --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) + --enable-FEATURE[=ARG] include FEATURE [ARG=yes] + --enable-silent-rules less verbose build output (undo: "make V=1") + --disable-silent-rules verbose build output (undo: "make V=0") + --enable-dependency-tracking + do not reject slow dependency extractors + --disable-dependency-tracking + speeds up one-time build + --enable-shared[=PKGS] build shared libraries [default=yes] + --enable-static[=PKGS] build static libraries [default=yes] + --enable-fast-install[=PKGS] + optimize for fast installation [default=yes] + --disable-libtool-lock avoid locking (might break parallel builds) + +Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] + --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) + --with-pic[=PKGS] try to use only PIC/non-PIC objects [default=use + both] + --with-aix-soname=aix|svr4|both + shared library versioning (aka "SONAME") variant to + provide on AIX, [default=aix]. + --with-gnu-ld assume the C compiler uses GNU ld [default=no] + --with-sysroot[=DIR] Search for dependent libraries within DIR (or the + compiler's sysroot if not specified). + +Some influential environment variables: + CC C compiler command + CFLAGS C compiler flags + LDFLAGS linker flags, e.g. -L if you have libraries in a + nonstandard directory + LIBS libraries to pass to the linker, e.g. -l + CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if + you have headers in a nonstandard directory + LT_SYS_LIBRARY_PATH + User-defined run-time library search path. + +Use these variables to override the choices made by `configure' or to help +it to find libraries and programs with nonstandard names/locations. + +Report bugs to the package provider. +_ACEOF +ac_status=$? +fi + +if test "$ac_init_help" = "recursive"; then + # If there are subdirs, report their specific --help. + for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue + test -d "$ac_dir" || + { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || + continue + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`printf "%s\n" "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`printf "%s\n" "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + cd "$ac_dir" || { ac_status=$?; continue; } + # Check for configure.gnu first; this name is used for a wrapper for + # Metaconfig's "Configure" on case-insensitive file systems. + if test -f "$ac_srcdir/configure.gnu"; then + echo && + $SHELL "$ac_srcdir/configure.gnu" --help=recursive + elif test -f "$ac_srcdir/configure"; then + echo && + $SHELL "$ac_srcdir/configure" --help=recursive + else + printf "%s\n" "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi || ac_status=$? + cd "$ac_pwd" || { ac_status=$?; break; } + done +fi + +test -n "$ac_init_help" && exit $ac_status +if $ac_init_version; then + cat <<\_ACEOF +libenet configure 1.3.18 +generated by GNU Autoconf 2.71 + +Copyright (C) 2021 Free Software Foundation, Inc. +This configure script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it. +_ACEOF + exit +fi + +## ------------------------ ## +## Autoconf initialization. ## +## ------------------------ ## + +# ac_fn_c_try_compile LINENO +# -------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext conftest.beam + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +printf "%s\n" "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext +then : + ac_retval=0 +else $as_nop + printf "%s\n" "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_compile + +# ac_fn_c_try_link LINENO +# ----------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_link () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext conftest.beam conftest$ac_exeext + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +printf "%s\n" "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + test -x conftest$ac_exeext + } +then : + ac_retval=0 +else $as_nop + printf "%s\n" "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information + # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would + # interfere with the next link command; also delete a directory that is + # left behind by Apple's compiler. We do this before executing the actions. + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_link + +# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES +# ------------------------------------------------------- +# Tests whether HEADER exists and can be compiled using the include files in +# INCLUDES, setting the cache variable VAR accordingly. +ac_fn_c_check_header_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +printf %s "checking for $2... " >&6; } +if eval test \${$3+y} +then : + printf %s "(cached) " >&6 +else $as_nop + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_c_try_compile "$LINENO" +then : + eval "$3=yes" +else $as_nop + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext +fi +eval ac_res=\$$3 + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +printf "%s\n" "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_header_compile + +# ac_fn_c_check_func LINENO FUNC VAR +# ---------------------------------- +# Tests whether FUNC exists, setting the cache variable VAR accordingly +ac_fn_c_check_func () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +printf %s "checking for $2... " >&6; } +if eval test \${$3+y} +then : + printf %s "(cached) " >&6 +else $as_nop + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +/* Define $2 to an innocuous variant, in case declares $2. + For example, HP-UX 11i declares gettimeofday. */ +#define $2 innocuous_$2 + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $2 (); below. */ + +#include +#undef $2 + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $2 (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$2 || defined __stub___$2 +choke me +#endif + +int +main (void) +{ +return $2 (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + eval "$3=yes" +else $as_nop + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext +fi +eval ac_res=\$$3 + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +printf "%s\n" "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_func + +# ac_fn_c_check_member LINENO AGGR MEMBER VAR INCLUDES +# ---------------------------------------------------- +# Tries to find if the field MEMBER exists in type AGGR, after including +# INCLUDES, setting cache variable VAR accordingly. +ac_fn_c_check_member () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $2.$3" >&5 +printf %s "checking for $2.$3... " >&6; } +if eval test \${$4+y} +then : + printf %s "(cached) " >&6 +else $as_nop + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$5 +int +main (void) +{ +static $2 ac_aggr; +if (ac_aggr.$3) +return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO" +then : + eval "$4=yes" +else $as_nop + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$5 +int +main (void) +{ +static $2 ac_aggr; +if (sizeof ac_aggr.$3) +return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO" +then : + eval "$4=yes" +else $as_nop + eval "$4=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext +fi +eval ac_res=\$$4 + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +printf "%s\n" "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_member + +# ac_fn_c_check_type LINENO TYPE VAR INCLUDES +# ------------------------------------------- +# Tests whether TYPE exists after having included INCLUDES, setting cache +# variable VAR accordingly. +ac_fn_c_check_type () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +printf %s "checking for $2... " >&6; } +if eval test \${$3+y} +then : + printf %s "(cached) " >&6 +else $as_nop + eval "$3=no" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main (void) +{ +if (sizeof ($2)) + return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO" +then : + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main (void) +{ +if (sizeof (($2))) + return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO" +then : + +else $as_nop + eval "$3=yes" +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext +fi +eval ac_res=\$$3 + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +printf "%s\n" "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_type +ac_configure_args_raw= +for ac_arg +do + case $ac_arg in + *\'*) + ac_arg=`printf "%s\n" "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + as_fn_append ac_configure_args_raw " '$ac_arg'" +done + +case $ac_configure_args_raw in + *$as_nl*) + ac_safe_unquote= ;; + *) + ac_unsafe_z='|&;<>()$`\\"*?[ '' ' # This string ends in space, tab. + ac_unsafe_a="$ac_unsafe_z#~" + ac_safe_unquote="s/ '\\([^$ac_unsafe_a][^$ac_unsafe_z]*\\)'/ \\1/g" + ac_configure_args_raw=` printf "%s\n" "$ac_configure_args_raw" | sed "$ac_safe_unquote"`;; +esac + +cat >config.log <<_ACEOF +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by libenet $as_me 1.3.18, which was +generated by GNU Autoconf 2.71. Invocation command line was + + $ $0$ac_configure_args_raw + +_ACEOF +exec 5>>config.log +{ +cat <<_ASUNAME +## --------- ## +## Platform. ## +## --------- ## + +hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` + +/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` +/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` +/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` +/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` + +_ASUNAME + +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + printf "%s\n" "PATH: $as_dir" + done +IFS=$as_save_IFS + +} >&5 + +cat >&5 <<_ACEOF + + +## ----------- ## +## Core tests. ## +## ----------- ## + +_ACEOF + + +# Keep a trace of the command line. +# Strip out --no-create and --no-recursion so they do not pile up. +# Strip out --silent because we don't want to record it for future runs. +# Also quote any args containing shell meta-characters. +# Make two passes to allow for proper duplicate-argument suppression. +ac_configure_args= +ac_configure_args0= +ac_configure_args1= +ac_must_keep_next=false +for ac_pass in 1 2 +do + for ac_arg + do + case $ac_arg in + -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + continue ;; + *\'*) + ac_arg=`printf "%s\n" "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + case $ac_pass in + 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; + 2) + as_fn_append ac_configure_args1 " '$ac_arg'" + if test $ac_must_keep_next = true; then + ac_must_keep_next=false # Got value, back to normal. + else + case $ac_arg in + *=* | --config-cache | -C | -disable-* | --disable-* \ + | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ + | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ + | -with-* | --with-* | -without-* | --without-* | --x) + case "$ac_configure_args0 " in + "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; + esac + ;; + -* ) ac_must_keep_next=true ;; + esac + fi + as_fn_append ac_configure_args " '$ac_arg'" + ;; + esac + done +done +{ ac_configure_args0=; unset ac_configure_args0;} +{ ac_configure_args1=; unset ac_configure_args1;} + +# When interrupted or exit'd, cleanup temporary files, and complete +# config.log. We remove comments because anyway the quotes in there +# would cause problems or look ugly. +# WARNING: Use '\'' to represent an apostrophe within the trap. +# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. +trap 'exit_status=$? + # Sanitize IFS. + IFS=" "" $as_nl" + # Save into config.log some information that might help in debugging. + { + echo + + printf "%s\n" "## ---------------- ## +## Cache variables. ## +## ---------------- ##" + echo + # The following way of writing the cache mishandles newlines in values, +( + for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + (set) 2>&1 | + case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + sed -n \ + "s/'\''/'\''\\\\'\'''\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" + ;; #( + *) + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) + echo + + printf "%s\n" "## ----------------- ## +## Output variables. ## +## ----------------- ##" + echo + for ac_var in $ac_subst_vars + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`printf "%s\n" "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + printf "%s\n" "$ac_var='\''$ac_val'\''" + done | sort + echo + + if test -n "$ac_subst_files"; then + printf "%s\n" "## ------------------- ## +## File substitutions. ## +## ------------------- ##" + echo + for ac_var in $ac_subst_files + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`printf "%s\n" "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + printf "%s\n" "$ac_var='\''$ac_val'\''" + done | sort + echo + fi + + if test -s confdefs.h; then + printf "%s\n" "## ----------- ## +## confdefs.h. ## +## ----------- ##" + echo + cat confdefs.h + echo + fi + test "$ac_signal" != 0 && + printf "%s\n" "$as_me: caught signal $ac_signal" + printf "%s\n" "$as_me: exit $exit_status" + } >&5 + rm -f core *.core core.conftest.* && + rm -f -r conftest* confdefs* conf$$* $ac_clean_files && + exit $exit_status +' 0 +for ac_signal in 1 2 13 15; do + trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal +done +ac_signal=0 + +# confdefs.h avoids OS command line length limits that DEFS can exceed. +rm -f -r conftest* confdefs.h + +printf "%s\n" "/* confdefs.h */" > confdefs.h + +# Predefined preprocessor variables. + +printf "%s\n" "#define PACKAGE_NAME \"$PACKAGE_NAME\"" >>confdefs.h + +printf "%s\n" "#define PACKAGE_TARNAME \"$PACKAGE_TARNAME\"" >>confdefs.h + +printf "%s\n" "#define PACKAGE_VERSION \"$PACKAGE_VERSION\"" >>confdefs.h + +printf "%s\n" "#define PACKAGE_STRING \"$PACKAGE_STRING\"" >>confdefs.h + +printf "%s\n" "#define PACKAGE_BUGREPORT \"$PACKAGE_BUGREPORT\"" >>confdefs.h + +printf "%s\n" "#define PACKAGE_URL \"$PACKAGE_URL\"" >>confdefs.h + + +# Let the site file select an alternate cache file if it wants to. +# Prefer an explicitly selected file to automatically selected ones. +if test -n "$CONFIG_SITE"; then + ac_site_files="$CONFIG_SITE" +elif test "x$prefix" != xNONE; then + ac_site_files="$prefix/share/config.site $prefix/etc/config.site" +else + ac_site_files="$ac_default_prefix/share/config.site $ac_default_prefix/etc/config.site" +fi + +for ac_site_file in $ac_site_files +do + case $ac_site_file in #( + */*) : + ;; #( + *) : + ac_site_file=./$ac_site_file ;; +esac + if test -f "$ac_site_file" && test -r "$ac_site_file"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 +printf "%s\n" "$as_me: loading site script $ac_site_file" >&6;} + sed 's/^/| /' "$ac_site_file" >&5 + . "$ac_site_file" \ + || { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "failed to load site script $ac_site_file +See \`config.log' for more details" "$LINENO" 5; } + fi +done + +if test -r "$cache_file"; then + # Some versions of bash will fail to source /dev/null (special files + # actually), so we avoid doing that. DJGPP emulates it as a regular file. + if test /dev/null != "$cache_file" && test -f "$cache_file"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 +printf "%s\n" "$as_me: loading cache $cache_file" >&6;} + case $cache_file in + [\\/]* | ?:[\\/]* ) . "$cache_file";; + *) . "./$cache_file";; + esac + fi +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 +printf "%s\n" "$as_me: creating cache $cache_file" >&6;} + >$cache_file +fi + +# Test code for whether the C compiler supports C89 (global declarations) +ac_c_conftest_c89_globals=' +/* Does the compiler advertise C89 conformance? + Do not test the value of __STDC__, because some compilers set it to 0 + while being otherwise adequately conformant. */ +#if !defined __STDC__ +# error "Compiler does not advertise C89 conformance" +#endif + +#include +#include +struct stat; +/* Most of the following tests are stolen from RCS 5.7 src/conf.sh. */ +struct buf { int x; }; +struct buf * (*rcsopen) (struct buf *, struct stat *, int); +static char *e (p, i) + char **p; + int i; +{ + return p[i]; +} +static char *f (char * (*g) (char **, int), char **p, ...) +{ + char *s; + va_list v; + va_start (v,p); + s = g (p, va_arg (v,int)); + va_end (v); + return s; +} + +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not \xHH hex character constants. + These do not provoke an error unfortunately, instead are silently treated + as an "x". The following induces an error, until -std is added to get + proper ANSI mode. Curiously \x00 != x always comes out true, for an + array size at least. It is necessary to write \x00 == 0 to get something + that is true only with -std. */ +int osf4_cc_array ['\''\x00'\'' == 0 ? 1 : -1]; + +/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters + inside strings and character constants. */ +#define FOO(x) '\''x'\'' +int xlc6_cc_array[FOO(a) == '\''x'\'' ? 1 : -1]; + +int test (int i, double x); +struct s1 {int (*f) (int a);}; +struct s2 {int (*f) (double a);}; +int pairnames (int, char **, int *(*)(struct buf *, struct stat *, int), + int, int);' + +# Test code for whether the C compiler supports C89 (body of main). +ac_c_conftest_c89_main=' +ok |= (argc == 0 || f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]); +' + +# Test code for whether the C compiler supports C99 (global declarations) +ac_c_conftest_c99_globals=' +// Does the compiler advertise C99 conformance? +#if !defined __STDC_VERSION__ || __STDC_VERSION__ < 199901L +# error "Compiler does not advertise C99 conformance" +#endif + +#include +extern int puts (const char *); +extern int printf (const char *, ...); +extern int dprintf (int, const char *, ...); +extern void *malloc (size_t); + +// Check varargs macros. These examples are taken from C99 6.10.3.5. +// dprintf is used instead of fprintf to avoid needing to declare +// FILE and stderr. +#define debug(...) dprintf (2, __VA_ARGS__) +#define showlist(...) puts (#__VA_ARGS__) +#define report(test,...) ((test) ? puts (#test) : printf (__VA_ARGS__)) +static void +test_varargs_macros (void) +{ + int x = 1234; + int y = 5678; + debug ("Flag"); + debug ("X = %d\n", x); + showlist (The first, second, and third items.); + report (x>y, "x is %d but y is %d", x, y); +} + +// Check long long types. +#define BIG64 18446744073709551615ull +#define BIG32 4294967295ul +#define BIG_OK (BIG64 / BIG32 == 4294967297ull && BIG64 % BIG32 == 0) +#if !BIG_OK + #error "your preprocessor is broken" +#endif +#if BIG_OK +#else + #error "your preprocessor is broken" +#endif +static long long int bignum = -9223372036854775807LL; +static unsigned long long int ubignum = BIG64; + +struct incomplete_array +{ + int datasize; + double data[]; +}; + +struct named_init { + int number; + const wchar_t *name; + double average; +}; + +typedef const char *ccp; + +static inline int +test_restrict (ccp restrict text) +{ + // See if C++-style comments work. + // Iterate through items via the restricted pointer. + // Also check for declarations in for loops. + for (unsigned int i = 0; *(text+i) != '\''\0'\''; ++i) + continue; + return 0; +} + +// Check varargs and va_copy. +static bool +test_varargs (const char *format, ...) +{ + va_list args; + va_start (args, format); + va_list args_copy; + va_copy (args_copy, args); + + const char *str = ""; + int number = 0; + float fnumber = 0; + + while (*format) + { + switch (*format++) + { + case '\''s'\'': // string + str = va_arg (args_copy, const char *); + break; + case '\''d'\'': // int + number = va_arg (args_copy, int); + break; + case '\''f'\'': // float + fnumber = va_arg (args_copy, double); + break; + default: + break; + } + } + va_end (args_copy); + va_end (args); + + return *str && number && fnumber; +} +' + +# Test code for whether the C compiler supports C99 (body of main). +ac_c_conftest_c99_main=' + // Check bool. + _Bool success = false; + success |= (argc != 0); + + // Check restrict. + if (test_restrict ("String literal") == 0) + success = true; + char *restrict newvar = "Another string"; + + // Check varargs. + success &= test_varargs ("s, d'\'' f .", "string", 65, 34.234); + test_varargs_macros (); + + // Check flexible array members. + struct incomplete_array *ia = + malloc (sizeof (struct incomplete_array) + (sizeof (double) * 10)); + ia->datasize = 10; + for (int i = 0; i < ia->datasize; ++i) + ia->data[i] = i * 1.234; + + // Check named initializers. + struct named_init ni = { + .number = 34, + .name = L"Test wide string", + .average = 543.34343, + }; + + ni.number = 58; + + int dynamic_array[ni.number]; + dynamic_array[0] = argv[0][0]; + dynamic_array[ni.number - 1] = 543; + + // work around unused variable warnings + ok |= (!success || bignum == 0LL || ubignum == 0uLL || newvar[0] == '\''x'\'' + || dynamic_array[ni.number - 1] != 543); +' + +# Test code for whether the C compiler supports C11 (global declarations) +ac_c_conftest_c11_globals=' +// Does the compiler advertise C11 conformance? +#if !defined __STDC_VERSION__ || __STDC_VERSION__ < 201112L +# error "Compiler does not advertise C11 conformance" +#endif + +// Check _Alignas. +char _Alignas (double) aligned_as_double; +char _Alignas (0) no_special_alignment; +extern char aligned_as_int; +char _Alignas (0) _Alignas (int) aligned_as_int; + +// Check _Alignof. +enum +{ + int_alignment = _Alignof (int), + int_array_alignment = _Alignof (int[100]), + char_alignment = _Alignof (char) +}; +_Static_assert (0 < -_Alignof (int), "_Alignof is signed"); + +// Check _Noreturn. +int _Noreturn does_not_return (void) { for (;;) continue; } + +// Check _Static_assert. +struct test_static_assert +{ + int x; + _Static_assert (sizeof (int) <= sizeof (long int), + "_Static_assert does not work in struct"); + long int y; +}; + +// Check UTF-8 literals. +#define u8 syntax error! +char const utf8_literal[] = u8"happens to be ASCII" "another string"; + +// Check duplicate typedefs. +typedef long *long_ptr; +typedef long int *long_ptr; +typedef long_ptr long_ptr; + +// Anonymous structures and unions -- taken from C11 6.7.2.1 Example 1. +struct anonymous +{ + union { + struct { int i; int j; }; + struct { int k; long int l; } w; + }; + int m; +} v1; +' + +# Test code for whether the C compiler supports C11 (body of main). +ac_c_conftest_c11_main=' + _Static_assert ((offsetof (struct anonymous, i) + == offsetof (struct anonymous, w.k)), + "Anonymous union alignment botch"); + v1.i = 2; + v1.w.k = 5; + ok |= v1.i != 5; +' + +# Test code for whether the C compiler supports C11 (complete). +ac_c_conftest_c11_program="${ac_c_conftest_c89_globals} +${ac_c_conftest_c99_globals} +${ac_c_conftest_c11_globals} + +int +main (int argc, char **argv) +{ + int ok = 0; + ${ac_c_conftest_c89_main} + ${ac_c_conftest_c99_main} + ${ac_c_conftest_c11_main} + return ok; +} +" + +# Test code for whether the C compiler supports C99 (complete). +ac_c_conftest_c99_program="${ac_c_conftest_c89_globals} +${ac_c_conftest_c99_globals} + +int +main (int argc, char **argv) +{ + int ok = 0; + ${ac_c_conftest_c89_main} + ${ac_c_conftest_c99_main} + return ok; +} +" + +# Test code for whether the C compiler supports C89 (complete). +ac_c_conftest_c89_program="${ac_c_conftest_c89_globals} + +int +main (int argc, char **argv) +{ + int ok = 0; + ${ac_c_conftest_c89_main} + return ok; +} +" + +as_fn_append ac_header_c_list " stdio.h stdio_h HAVE_STDIO_H" +as_fn_append ac_header_c_list " stdlib.h stdlib_h HAVE_STDLIB_H" +as_fn_append ac_header_c_list " string.h string_h HAVE_STRING_H" +as_fn_append ac_header_c_list " inttypes.h inttypes_h HAVE_INTTYPES_H" +as_fn_append ac_header_c_list " stdint.h stdint_h HAVE_STDINT_H" +as_fn_append ac_header_c_list " strings.h strings_h HAVE_STRINGS_H" +as_fn_append ac_header_c_list " sys/stat.h sys_stat_h HAVE_SYS_STAT_H" +as_fn_append ac_header_c_list " sys/types.h sys_types_h HAVE_SYS_TYPES_H" +as_fn_append ac_header_c_list " unistd.h unistd_h HAVE_UNISTD_H" + +# Auxiliary files required by this configure script. +ac_aux_files="config.guess config.sub ltmain.sh compile missing install-sh" + +# Locations in which to look for auxiliary files. +ac_aux_dir_candidates="${srcdir}${PATH_SEPARATOR}${srcdir}/..${PATH_SEPARATOR}${srcdir}/../.." + +# Search for a directory containing all of the required auxiliary files, +# $ac_aux_files, from the $PATH-style list $ac_aux_dir_candidates. +# If we don't find one directory that contains all the files we need, +# we report the set of missing files from the *first* directory in +# $ac_aux_dir_candidates and give up. +ac_missing_aux_files="" +ac_first_candidate=: +printf "%s\n" "$as_me:${as_lineno-$LINENO}: looking for aux files: $ac_aux_files" >&5 +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +as_found=false +for as_dir in $ac_aux_dir_candidates +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + as_found=: + + printf "%s\n" "$as_me:${as_lineno-$LINENO}: trying $as_dir" >&5 + ac_aux_dir_found=yes + ac_install_sh= + for ac_aux in $ac_aux_files + do + # As a special case, if "install-sh" is required, that requirement + # can be satisfied by any of "install-sh", "install.sh", or "shtool", + # and $ac_install_sh is set appropriately for whichever one is found. + if test x"$ac_aux" = x"install-sh" + then + if test -f "${as_dir}install-sh"; then + printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}install-sh found" >&5 + ac_install_sh="${as_dir}install-sh -c" + elif test -f "${as_dir}install.sh"; then + printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}install.sh found" >&5 + ac_install_sh="${as_dir}install.sh -c" + elif test -f "${as_dir}shtool"; then + printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}shtool found" >&5 + ac_install_sh="${as_dir}shtool install -c" + else + ac_aux_dir_found=no + if $ac_first_candidate; then + ac_missing_aux_files="${ac_missing_aux_files} install-sh" + else + break + fi + fi + else + if test -f "${as_dir}${ac_aux}"; then + printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}${ac_aux} found" >&5 + else + ac_aux_dir_found=no + if $ac_first_candidate; then + ac_missing_aux_files="${ac_missing_aux_files} ${ac_aux}" + else + break + fi + fi + fi + done + if test "$ac_aux_dir_found" = yes; then + ac_aux_dir="$as_dir" + break + fi + ac_first_candidate=false + + as_found=false +done +IFS=$as_save_IFS +if $as_found +then : + +else $as_nop + as_fn_error $? "cannot find required auxiliary files:$ac_missing_aux_files" "$LINENO" 5 +fi + + +# These three variables are undocumented and unsupported, +# and are intended to be withdrawn in a future Autoconf release. +# They can cause serious problems if a builder's source tree is in a directory +# whose full name contains unusual characters. +if test -f "${ac_aux_dir}config.guess"; then + ac_config_guess="$SHELL ${ac_aux_dir}config.guess" +fi +if test -f "${ac_aux_dir}config.sub"; then + ac_config_sub="$SHELL ${ac_aux_dir}config.sub" +fi +if test -f "$ac_aux_dir/configure"; then + ac_configure="$SHELL ${ac_aux_dir}configure" +fi + +# Check that the precious variables saved in the cache have kept the same +# value. +ac_cache_corrupted=false +for ac_var in $ac_precious_vars; do + eval ac_old_set=\$ac_cv_env_${ac_var}_set + eval ac_new_set=\$ac_env_${ac_var}_set + eval ac_old_val=\$ac_cv_env_${ac_var}_value + eval ac_new_val=\$ac_env_${ac_var}_value + case $ac_old_set,$ac_new_set in + set,) + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +printf "%s\n" "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,set) + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 +printf "%s\n" "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,);; + *) + if test "x$ac_old_val" != "x$ac_new_val"; then + # differences in whitespace do not lead to failure. + ac_old_val_w=`echo x $ac_old_val` + ac_new_val_w=`echo x $ac_new_val` + if test "$ac_old_val_w" != "$ac_new_val_w"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 +printf "%s\n" "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + ac_cache_corrupted=: + else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 +printf "%s\n" "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} + eval $ac_var=\$ac_old_val + fi + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 +printf "%s\n" "$as_me: former value: \`$ac_old_val'" >&2;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 +printf "%s\n" "$as_me: current value: \`$ac_new_val'" >&2;} + fi;; + esac + # Pass precious variables to config.status. + if test "$ac_new_set" = set; then + case $ac_new_val in + *\'*) ac_arg=$ac_var=`printf "%s\n" "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *) ac_arg=$ac_var=$ac_new_val ;; + esac + case " $ac_configure_args " in + *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. + *) as_fn_append ac_configure_args " '$ac_arg'" ;; + esac + fi +done +if $ac_cache_corrupted; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 +printf "%s\n" "$as_me: error: changes in the environment can compromise the build" >&2;} + as_fn_error $? "run \`${MAKE-make} distclean' and/or \`rm $cache_file' + and start over" "$LINENO" 5 +fi +## -------------------- ## +## Main body of script. ## +## -------------------- ## + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + +am__api_version='1.16' + + + + # Find a good install program. We prefer a C program (faster), +# so one script is as good as another. But avoid the broken or +# incompatible versions: +# SysV /etc/install, /usr/sbin/install +# SunOS /usr/etc/install +# IRIX /sbin/install +# AIX /bin/install +# AmigaOS /C/install, which installs bootblocks on floppy discs +# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag +# AFS /usr/afsws/bin/install, which mishandles nonexistent args +# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" +# OS/2's system install, which has a completely different semantic +# ./install, which can be erroneously created by make from ./install.sh. +# Reject install programs that cannot install multiple files. +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 +printf %s "checking for a BSD-compatible install... " >&6; } +if test -z "$INSTALL"; then +if test ${ac_cv_path_install+y} +then : + printf %s "(cached) " >&6 +else $as_nop + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + # Account for fact that we put trailing slashes in our PATH walk. +case $as_dir in #(( + ./ | /[cC]/* | \ + /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ + ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ + /usr/ucb/* ) ;; + *) + # OSF1 and SCO ODT 3.0 have their own names for install. + # Don't use installbsd from OSF since it installs stuff as root + # by default. + for ac_prog in ginstall scoinst install; do + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_prog$ac_exec_ext"; then + if test $ac_prog = install && + grep dspmsg "$as_dir$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # AIX install. It has an incompatible calling convention. + : + elif test $ac_prog = install && + grep pwplus "$as_dir$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # program-specific install script used by HP pwplus--don't use. + : + else + rm -rf conftest.one conftest.two conftest.dir + echo one > conftest.one + echo two > conftest.two + mkdir conftest.dir + if "$as_dir$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir/" && + test -s conftest.one && test -s conftest.two && + test -s conftest.dir/conftest.one && + test -s conftest.dir/conftest.two + then + ac_cv_path_install="$as_dir$ac_prog$ac_exec_ext -c" + break 3 + fi + fi + fi + done + done + ;; +esac + + done +IFS=$as_save_IFS + +rm -rf conftest.one conftest.two conftest.dir + +fi + if test ${ac_cv_path_install+y}; then + INSTALL=$ac_cv_path_install + else + # As a last resort, use the slow shell script. Don't cache a + # value for INSTALL within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + INSTALL=$ac_install_sh + fi +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 +printf "%s\n" "$INSTALL" >&6; } + +# Use test -z because SunOS4 sh mishandles braces in ${var-val}. +# It thinks the first close brace ends the variable substitution. +test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' + +test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' + +test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5 +printf %s "checking whether build environment is sane... " >&6; } +# Reject unsafe characters in $srcdir or the absolute working directory +# name. Accept space and tab only in the latter. +am_lf=' +' +case `pwd` in + *[\\\"\#\$\&\'\`$am_lf]*) + as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;; +esac +case $srcdir in + *[\\\"\#\$\&\'\`$am_lf\ \ ]*) + as_fn_error $? "unsafe srcdir value: '$srcdir'" "$LINENO" 5;; +esac + +# Do 'set' in a subshell so we don't clobber the current shell's +# arguments. Must try -L first in case configure is actually a +# symlink; some systems play weird games with the mod time of symlinks +# (eg FreeBSD returns the mod time of the symlink's containing +# directory). +if ( + am_has_slept=no + for am_try in 1 2; do + echo "timestamp, slept: $am_has_slept" > conftest.file + set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` + if test "$*" = "X"; then + # -L didn't work. + set X `ls -t "$srcdir/configure" conftest.file` + fi + if test "$*" != "X $srcdir/configure conftest.file" \ + && test "$*" != "X conftest.file $srcdir/configure"; then + + # If neither matched, then we have a broken ls. This can happen + # if, for instance, CONFIG_SHELL is bash and it inherits a + # broken ls alias from the environment. This has actually + # happened. Such a system could not be considered "sane". + as_fn_error $? "ls -t appears to fail. Make sure there is not a broken + alias in your environment" "$LINENO" 5 + fi + if test "$2" = conftest.file || test $am_try -eq 2; then + break + fi + # Just in case. + sleep 1 + am_has_slept=yes + done + test "$2" = conftest.file + ) +then + # Ok. + : +else + as_fn_error $? "newly created file is older than distributed files! +Check your system clock" "$LINENO" 5 +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } +# If we didn't sleep, we still need to ensure time stamps of config.status and +# generated files are strictly newer. +am_sleep_pid= +if grep 'slept: no' conftest.file >/dev/null 2>&1; then + ( sleep 1 ) & + am_sleep_pid=$! +fi + +rm -f conftest.file + +test "$program_prefix" != NONE && + program_transform_name="s&^&$program_prefix&;$program_transform_name" +# Use a double $ so make ignores it. +test "$program_suffix" != NONE && + program_transform_name="s&\$&$program_suffix&;$program_transform_name" +# Double any \ or $. +# By default was `s,x,x', remove it if useless. +ac_script='s/[\\$]/&&/g;s/;s,x,x,$//' +program_transform_name=`printf "%s\n" "$program_transform_name" | sed "$ac_script"` + + +# Expand $ac_aux_dir to an absolute path. +am_aux_dir=`cd "$ac_aux_dir" && pwd` + + + if test x"${MISSING+set}" != xset; then + MISSING="\${SHELL} '$am_aux_dir/missing'" +fi +# Use eval to expand $SHELL +if eval "$MISSING --is-lightweight"; then + am_missing_run="$MISSING " +else + am_missing_run= + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5 +printf "%s\n" "$as_me: WARNING: 'missing' script is too old or missing" >&2;} +fi + +if test x"${install_sh+set}" != xset; then + case $am_aux_dir in + *\ * | *\ *) + install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; + *) + install_sh="\${SHELL} $am_aux_dir/install-sh" + esac +fi + +# Installed binaries are usually stripped using 'strip' when the user +# run "make install-strip". However 'strip' might not be the right +# tool to use in cross-compilation environments, therefore Automake +# will honor the 'STRIP' environment variable to overrule this program. +if test "$cross_compiling" != no; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. +set dummy ${ac_tool_prefix}strip; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_STRIP+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +STRIP=$ac_cv_prog_STRIP +if test -n "$STRIP"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 +printf "%s\n" "$STRIP" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. +set dummy strip; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_STRIP+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_STRIP="strip" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP +if test -n "$ac_ct_STRIP"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 +printf "%s\n" "$ac_ct_STRIP" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + if test "x$ac_ct_STRIP" = x; then + STRIP=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + STRIP=$ac_ct_STRIP + fi +else + STRIP="$ac_cv_prog_STRIP" +fi + +fi +INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" + + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a race-free mkdir -p" >&5 +printf %s "checking for a race-free mkdir -p... " >&6; } +if test -z "$MKDIR_P"; then + if test ${ac_cv_path_mkdir+y} +then : + printf %s "(cached) " >&6 +else $as_nop + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_prog in mkdir gmkdir; do + for ac_exec_ext in '' $ac_executable_extensions; do + as_fn_executable_p "$as_dir$ac_prog$ac_exec_ext" || continue + case `"$as_dir$ac_prog$ac_exec_ext" --version 2>&1` in #( + 'mkdir ('*'coreutils) '* | \ + 'BusyBox '* | \ + 'mkdir (fileutils) '4.1*) + ac_cv_path_mkdir=$as_dir$ac_prog$ac_exec_ext + break 3;; + esac + done + done + done +IFS=$as_save_IFS + +fi + + test -d ./--version && rmdir ./--version + if test ${ac_cv_path_mkdir+y}; then + MKDIR_P="$ac_cv_path_mkdir -p" + else + # As a last resort, use the slow shell script. Don't cache a + # value for MKDIR_P within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + MKDIR_P="$ac_install_sh -d" + fi +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5 +printf "%s\n" "$MKDIR_P" >&6; } + +for ac_prog in gawk mawk nawk awk +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_AWK+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$AWK"; then + ac_cv_prog_AWK="$AWK" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_AWK="$ac_prog" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AWK=$ac_cv_prog_AWK +if test -n "$AWK"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 +printf "%s\n" "$AWK" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + + test -n "$AWK" && break +done + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 +printf %s "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } +set x ${MAKE-make} +ac_make=`printf "%s\n" "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` +if eval test \${ac_cv_prog_make_${ac_make}_set+y} +then : + printf %s "(cached) " >&6 +else $as_nop + cat >conftest.make <<\_ACEOF +SHELL = /bin/sh +all: + @echo '@@@%%%=$(MAKE)=@@@%%%' +_ACEOF +# GNU make sometimes prints "make[1]: Entering ...", which would confuse us. +case `${MAKE-make} -f conftest.make 2>/dev/null` in + *@@@%%%=?*=@@@%%%*) + eval ac_cv_prog_make_${ac_make}_set=yes;; + *) + eval ac_cv_prog_make_${ac_make}_set=no;; +esac +rm -f conftest.make +fi +if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } + SET_MAKE= +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } + SET_MAKE="MAKE=${MAKE-make}" +fi + +rm -rf .tst 2>/dev/null +mkdir .tst 2>/dev/null +if test -d .tst; then + am__leading_dot=. +else + am__leading_dot=_ +fi +rmdir .tst 2>/dev/null + +# Check whether --enable-silent-rules was given. +if test ${enable_silent_rules+y} +then : + enableval=$enable_silent_rules; +fi + +case $enable_silent_rules in # ((( + yes) AM_DEFAULT_VERBOSITY=0;; + no) AM_DEFAULT_VERBOSITY=1;; + *) AM_DEFAULT_VERBOSITY=1;; +esac +am_make=${MAKE-make} +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5 +printf %s "checking whether $am_make supports nested variables... " >&6; } +if test ${am_cv_make_support_nested_variables+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if printf "%s\n" 'TRUE=$(BAR$(V)) +BAR0=false +BAR1=true +V=1 +am__doit: + @$(TRUE) +.PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then + am_cv_make_support_nested_variables=yes +else + am_cv_make_support_nested_variables=no +fi +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5 +printf "%s\n" "$am_cv_make_support_nested_variables" >&6; } +if test $am_cv_make_support_nested_variables = yes; then + AM_V='$(V)' + AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' +else + AM_V=$AM_DEFAULT_VERBOSITY + AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY +fi +AM_BACKSLASH='\' + +if test "`cd $srcdir && pwd`" != "`pwd`"; then + # Use -I$(srcdir) only when $(srcdir) != ., so that make's output + # is not polluted with repeated "-I." + am__isrc=' -I$(srcdir)' + # test to see if srcdir already configured + if test -f $srcdir/config.status; then + as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5 + fi +fi + +# test whether we have cygpath +if test -z "$CYGPATH_W"; then + if (cygpath --version) >/dev/null 2>/dev/null; then + CYGPATH_W='cygpath -w' + else + CYGPATH_W=echo + fi +fi + + +# Define the identity of the package. + PACKAGE='libenet' + VERSION='1.3.18' + + +printf "%s\n" "#define PACKAGE \"$PACKAGE\"" >>confdefs.h + + +printf "%s\n" "#define VERSION \"$VERSION\"" >>confdefs.h + +# Some tools Automake needs. + +ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"} + + +AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"} + + +AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"} + + +AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"} + + +MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} + +# For better backward compatibility. To be removed once Automake 1.9.x +# dies out for good. For more background, see: +# +# +mkdir_p='$(MKDIR_P)' + +# We need awk for the "check" target (and possibly the TAP driver). The +# system "awk" is bad on some platforms. +# Always define AMTAR for backward compatibility. Yes, it's still used +# in the wild :-( We should find a proper way to deprecate it ... +AMTAR='$${TAR-tar}' + + +# We'll loop over all known methods to create a tar archive until one works. +_am_tools='gnutar pax cpio none' + +am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -' + + + + + +# Variables for tags utilities; see am/tags.am +if test -z "$CTAGS"; then + CTAGS=ctags +fi + +if test -z "$ETAGS"; then + ETAGS=etags +fi + +if test -z "$CSCOPE"; then + CSCOPE=cscope +fi + + + +# POSIX will say in a future version that running "rm -f" with no argument +# is OK; and we want to be able to make that assumption in our Makefile +# recipes. So use an aggressive probe to check that the usage we want is +# actually supported "in the wild" to an acceptable degree. +# See automake bug#10828. +# To make any issue more visible, cause the running configure to be aborted +# by default if the 'rm' program in use doesn't match our expectations; the +# user can still override this though. +if rm -f && rm -fr && rm -rf; then : OK; else + cat >&2 <<'END' +Oops! + +Your 'rm' program seems unable to run without file operands specified +on the command line, even when the '-f' option is present. This is contrary +to the behaviour of most rm programs out there, and not conforming with +the upcoming POSIX standard: + +Please tell bug-automake@gnu.org about your system, including the value +of your $PATH and any error possibly output before this message. This +can help us improve future automake versions. + +END + if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then + echo 'Configuration will proceed anyway, since you have set the' >&2 + echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 + echo >&2 + else + cat >&2 <<'END' +Aborting the configuration process, to ensure you take notice of the issue. + +You can download and install GNU coreutils to get an 'rm' implementation +that behaves properly: . + +If you want to complete the configuration process using your problematic +'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM +to "yes", and re-run configure. + +END + as_fn_error $? "Your 'rm' program is bad, sorry." "$LINENO" 5 + fi +fi + + + + + + + + + + + + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. +set dummy ${ac_tool_prefix}gcc; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}gcc" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +printf "%s\n" "$CC" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "gcc", so it can be a program name with args. +set dummy gcc; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="gcc" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +printf "%s\n" "$ac_ct_CC" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +else + CC="$ac_cv_prog_CC" +fi + +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. +set dummy ${ac_tool_prefix}cc; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}cc" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +printf "%s\n" "$CC" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + + fi +fi +if test -z "$CC"; then + # Extract the first word of "cc", so it can be a program name with args. +set dummy cc; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else + ac_prog_rejected=no +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + if test "$as_dir$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + ac_prog_rejected=yes + continue + fi + ac_cv_prog_CC="cc" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +if test $ac_prog_rejected = yes; then + # We found a bogon in the path, so make sure we never use it. + set dummy $ac_cv_prog_CC + shift + if test $# != 0; then + # We chose a different compiler from the bogus one. + # However, it has the same basename, so the bogon will be chosen + # first if we set CC to just the basename; use the full file name. + shift + ac_cv_prog_CC="$as_dir$ac_word${1+' '}$@" + fi +fi +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +printf "%s\n" "$CC" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + +fi +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + for ac_prog in cl.exe + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="$ac_tool_prefix$ac_prog" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +printf "%s\n" "$CC" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + + test -n "$CC" && break + done +fi +if test -z "$CC"; then + ac_ct_CC=$CC + for ac_prog in cl.exe +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="$ac_prog" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +printf "%s\n" "$ac_ct_CC" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + + test -n "$ac_ct_CC" && break +done + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +fi + +fi +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}clang", so it can be a program name with args. +set dummy ${ac_tool_prefix}clang; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}clang" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +printf "%s\n" "$CC" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "clang", so it can be a program name with args. +set dummy clang; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="clang" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +printf "%s\n" "$ac_ct_CC" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +else + CC="$ac_cv_prog_CC" +fi + +fi + + +test -z "$CC" && { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "no acceptable C compiler found in \$PATH +See \`config.log' for more details" "$LINENO" 5; } + +# Provide some information about the compiler. +printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion -version; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +printf "%s\n" "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ + + ; + return 0; +} +_ACEOF +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" +# Try to create an executable without -o first, disregard a.out. +# It will help us diagnose broken compilers, and finding out an intuition +# of exeext. +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 +printf %s "checking whether the C compiler works... " >&6; } +ac_link_default=`printf "%s\n" "$ac_link" | sed 's/ -o *conftest[^ ]*//'` + +# The possible output files: +ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" + +ac_rmfiles= +for ac_file in $ac_files +do + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + * ) ac_rmfiles="$ac_rmfiles $ac_file";; + esac +done +rm -f $ac_rmfiles + +if { { ac_try="$ac_link_default" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +printf "%s\n" "$ac_try_echo"; } >&5 + (eval "$ac_link_default") 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +then : + # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. +# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' +# in a Makefile. We should not override ac_cv_exeext if it was cached, +# so that the user can short-circuit this test for compilers unknown to +# Autoconf. +for ac_file in $ac_files '' +do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) + ;; + [ab].out ) + # We found the default executable, but exeext='' is most + # certainly right. + break;; + *.* ) + if test ${ac_cv_exeext+y} && test "$ac_cv_exeext" != no; + then :; else + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + fi + # We set ac_cv_exeext here because the later test for it is not + # safe: cross compilers may not add the suffix if given an `-o' + # argument, so we may need to know it at that point already. + # Even if this section looks crufty: it has the advantage of + # actually working. + break;; + * ) + break;; + esac +done +test "$ac_cv_exeext" = no && ac_cv_exeext= + +else $as_nop + ac_file='' +fi +if test -z "$ac_file" +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +printf "%s\n" "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "C compiler cannot create executables +See \`config.log' for more details" "$LINENO" 5; } +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 +printf %s "checking for C compiler default output file name... " >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 +printf "%s\n" "$ac_file" >&6; } +ac_exeext=$ac_cv_exeext + +rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out +ac_clean_files=$ac_clean_files_save +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 +printf %s "checking for suffix of executables... " >&6; } +if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +printf "%s\n" "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +then : + # If both `conftest.exe' and `conftest' are `present' (well, observable) +# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will +# work properly (i.e., refer to `conftest.exe'), while it won't with +# `rm'. +for ac_file in conftest.exe conftest conftest.*; do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + break;; + * ) break;; + esac +done +else $as_nop + { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest conftest$ac_cv_exeext +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 +printf "%s\n" "$ac_cv_exeext" >&6; } + +rm -f conftest.$ac_ext +EXEEXT=$ac_cv_exeext +ac_exeext=$EXEEXT +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main (void) +{ +FILE *f = fopen ("conftest.out", "w"); + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +ac_clean_files="$ac_clean_files conftest.out" +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 +printf %s "checking whether we are cross compiling... " >&6; } +if test "$cross_compiling" != yes; then + { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +printf "%s\n" "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if { ac_try='./conftest$ac_cv_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +printf "%s\n" "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else + { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details" "$LINENO" 5; } + fi + fi +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 +printf "%s\n" "$cross_compiling" >&6; } + +rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out +ac_clean_files=$ac_clean_files_save +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 +printf %s "checking for suffix of object files... " >&6; } +if test ${ac_cv_objext+y} +then : + printf %s "(cached) " >&6 +else $as_nop + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.o conftest.obj +if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +printf "%s\n" "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +then : + for ac_file in conftest.o conftest.obj conftest.*; do + test -f "$ac_file" || continue; + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; + *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` + break;; + esac +done +else $as_nop + printf "%s\n" "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of object files: cannot compile +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest.$ac_cv_objext conftest.$ac_ext +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 +printf "%s\n" "$ac_cv_objext" >&6; } +OBJEXT=$ac_cv_objext +ac_objext=$OBJEXT +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the compiler supports GNU C" >&5 +printf %s "checking whether the compiler supports GNU C... " >&6; } +if test ${ac_cv_c_compiler_gnu+y} +then : + printf %s "(cached) " >&6 +else $as_nop + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO" +then : + ac_compiler_gnu=yes +else $as_nop + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext +ac_cv_c_compiler_gnu=$ac_compiler_gnu + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 +printf "%s\n" "$ac_cv_c_compiler_gnu" >&6; } +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +if test $ac_compiler_gnu = yes; then + GCC=yes +else + GCC= +fi +ac_test_CFLAGS=${CFLAGS+y} +ac_save_CFLAGS=$CFLAGS +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 +printf %s "checking whether $CC accepts -g... " >&6; } +if test ${ac_cv_prog_cc_g+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_save_c_werror_flag=$ac_c_werror_flag + ac_c_werror_flag=yes + ac_cv_prog_cc_g=no + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO" +then : + ac_cv_prog_cc_g=yes +else $as_nop + CFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO" +then : + +else $as_nop + ac_c_werror_flag=$ac_save_c_werror_flag + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO" +then : + ac_cv_prog_cc_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext + ac_c_werror_flag=$ac_save_c_werror_flag +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 +printf "%s\n" "$ac_cv_prog_cc_g" >&6; } +if test $ac_test_CFLAGS; then + CFLAGS=$ac_save_CFLAGS +elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then + CFLAGS="-g -O2" + else + CFLAGS="-g" + fi +else + if test "$GCC" = yes; then + CFLAGS="-O2" + else + CFLAGS= + fi +fi +ac_prog_cc_stdc=no +if test x$ac_prog_cc_stdc = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C11 features" >&5 +printf %s "checking for $CC option to enable C11 features... " >&6; } +if test ${ac_cv_prog_cc_c11+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_cv_prog_cc_c11=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_c_conftest_c11_program +_ACEOF +for ac_arg in '' -std=gnu11 +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO" +then : + ac_cv_prog_cc_c11=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam + test "x$ac_cv_prog_cc_c11" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC +fi + +if test "x$ac_cv_prog_cc_c11" = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +printf "%s\n" "unsupported" >&6; } +else $as_nop + if test "x$ac_cv_prog_cc_c11" = x +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +printf "%s\n" "none needed" >&6; } +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c11" >&5 +printf "%s\n" "$ac_cv_prog_cc_c11" >&6; } + CC="$CC $ac_cv_prog_cc_c11" +fi + ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c11 + ac_prog_cc_stdc=c11 +fi +fi +if test x$ac_prog_cc_stdc = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C99 features" >&5 +printf %s "checking for $CC option to enable C99 features... " >&6; } +if test ${ac_cv_prog_cc_c99+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_cv_prog_cc_c99=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_c_conftest_c99_program +_ACEOF +for ac_arg in '' -std=gnu99 -std=c99 -c99 -qlanglvl=extc1x -qlanglvl=extc99 -AC99 -D_STDC_C99= +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO" +then : + ac_cv_prog_cc_c99=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam + test "x$ac_cv_prog_cc_c99" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC +fi + +if test "x$ac_cv_prog_cc_c99" = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +printf "%s\n" "unsupported" >&6; } +else $as_nop + if test "x$ac_cv_prog_cc_c99" = x +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +printf "%s\n" "none needed" >&6; } +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c99" >&5 +printf "%s\n" "$ac_cv_prog_cc_c99" >&6; } + CC="$CC $ac_cv_prog_cc_c99" +fi + ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c99 + ac_prog_cc_stdc=c99 +fi +fi +if test x$ac_prog_cc_stdc = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C89 features" >&5 +printf %s "checking for $CC option to enable C89 features... " >&6; } +if test ${ac_cv_prog_cc_c89+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_cv_prog_cc_c89=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_c_conftest_c89_program +_ACEOF +for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO" +then : + ac_cv_prog_cc_c89=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam + test "x$ac_cv_prog_cc_c89" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC +fi + +if test "x$ac_cv_prog_cc_c89" = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +printf "%s\n" "unsupported" >&6; } +else $as_nop + if test "x$ac_cv_prog_cc_c89" = x +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +printf "%s\n" "none needed" >&6; } +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 +printf "%s\n" "$ac_cv_prog_cc_c89" >&6; } + CC="$CC $ac_cv_prog_cc_c89" +fi + ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c89 + ac_prog_cc_stdc=c89 +fi +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5 +printf %s "checking whether $CC understands -c and -o together... " >&6; } +if test ${am_cv_prog_cc_c_o+y} +then : + printf %s "(cached) " >&6 +else $as_nop + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ + + ; + return 0; +} +_ACEOF + # Make sure it works both with $CC and with simple cc. + # Following AC_PROG_CC_C_O, we do the test twice because some + # compilers refuse to overwrite an existing .o file with -o, + # though they will create one. + am_cv_prog_cc_c_o=yes + for am_i in 1 2; do + if { echo "$as_me:$LINENO: $CC -c conftest.$ac_ext -o conftest2.$ac_objext" >&5 + ($CC -c conftest.$ac_ext -o conftest2.$ac_objext) >&5 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } \ + && test -f conftest2.$ac_objext; then + : OK + else + am_cv_prog_cc_c_o=no + break + fi + done + rm -f core conftest* + unset am_i +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5 +printf "%s\n" "$am_cv_prog_cc_c_o" >&6; } +if test "$am_cv_prog_cc_c_o" != yes; then + # Losing compiler, so override with the script. + # FIXME: It is wrong to rewrite CC. + # But if we don't then we get into trouble of one sort or another. + # A longer-term fix would be to have automake use am__CC in this case, + # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" + CC="$am_aux_dir/compile $CC" +fi +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +DEPDIR="${am__leading_dot}deps" + +ac_config_commands="$ac_config_commands depfiles" + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} supports the include directive" >&5 +printf %s "checking whether ${MAKE-make} supports the include directive... " >&6; } +cat > confinc.mk << 'END' +am__doit: + @echo this is the am__doit target >confinc.out +.PHONY: am__doit +END +am__include="#" +am__quote= +# BSD make does it like this. +echo '.include "confinc.mk" # ignored' > confmf.BSD +# Other make implementations (GNU, Solaris 10, AIX) do it like this. +echo 'include confinc.mk # ignored' > confmf.GNU +_am_result=no +for s in GNU BSD; do + { echo "$as_me:$LINENO: ${MAKE-make} -f confmf.$s && cat confinc.out" >&5 + (${MAKE-make} -f confmf.$s && cat confinc.out) >&5 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + case $?:`cat confinc.out 2>/dev/null` in #( + '0:this is the am__doit target') : + case $s in #( + BSD) : + am__include='.include' am__quote='"' ;; #( + *) : + am__include='include' am__quote='' ;; +esac ;; #( + *) : + ;; +esac + if test "$am__include" != "#"; then + _am_result="yes ($s style)" + break + fi +done +rm -f confinc.* confmf.* +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ${_am_result}" >&5 +printf "%s\n" "${_am_result}" >&6; } + +# Check whether --enable-dependency-tracking was given. +if test ${enable_dependency_tracking+y} +then : + enableval=$enable_dependency_tracking; +fi + +if test "x$enable_dependency_tracking" != xno; then + am_depcomp="$ac_aux_dir/depcomp" + AMDEPBACKSLASH='\' + am__nodep='_no' +fi + if test "x$enable_dependency_tracking" != xno; then + AMDEP_TRUE= + AMDEP_FALSE='#' +else + AMDEP_TRUE='#' + AMDEP_FALSE= +fi + + + +depcc="$CC" am_compiler_list= + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 +printf %s "checking dependency style of $depcc... " >&6; } +if test ${am_cv_CC_dependencies_compiler_type+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named 'D' -- because '-MD' means "put the output + # in D". + rm -rf conftest.dir + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_CC_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` + fi + am__universal=false + case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac + + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with + # Solaris 10 /bin/sh. + echo '/* dummy */' > sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + # We check with '-c' and '-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle '-M -o', and we need to detect this. Also, some Intel + # versions had trouble with output in subdirs. + am__obj=sub/conftest.${OBJEXT-o} + am__minus_obj="-o $am__obj" + case $depmode in + gcc) + # This depmode causes a compiler race in universal mode. + test "$am__universal" = false || continue + ;; + nosideeffect) + # After this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested. + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + msvc7 | msvc7msys | msvisualcpp | msvcmsys) + # This compiler won't grok '-c -o', but also, the minuso test has + # not run yet. These depmodes are late enough in the game, and + # so weak that their functioning should not be impacted. + am__obj=conftest.${OBJEXT-o} + am__minus_obj= + ;; + none) break ;; + esac + if depmode=$depmode \ + source=sub/conftest.c object=$am__obj \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep $am__obj sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_CC_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_CC_dependencies_compiler_type=none +fi + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 +printf "%s\n" "$am_cv_CC_dependencies_compiler_type" >&6; } +CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type + + if + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then + am__fastdepCC_TRUE= + am__fastdepCC_FALSE='#' +else + am__fastdepCC_TRUE='#' + am__fastdepCC_FALSE= +fi + + +case `pwd` in + *\ * | *\ *) + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 +printf "%s\n" "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; +esac + + + +macro_version='2.4.7' +macro_revision='2.4.7' + + + + + + + + + + + + + + +ltmain=$ac_aux_dir/ltmain.sh + + + + # Make sure we can run config.sub. +$SHELL "${ac_aux_dir}config.sub" sun4 >/dev/null 2>&1 || + as_fn_error $? "cannot run $SHELL ${ac_aux_dir}config.sub" "$LINENO" 5 + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 +printf %s "checking build system type... " >&6; } +if test ${ac_cv_build+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_build_alias=$build_alias +test "x$ac_build_alias" = x && + ac_build_alias=`$SHELL "${ac_aux_dir}config.guess"` +test "x$ac_build_alias" = x && + as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 +ac_cv_build=`$SHELL "${ac_aux_dir}config.sub" $ac_build_alias` || + as_fn_error $? "$SHELL ${ac_aux_dir}config.sub $ac_build_alias failed" "$LINENO" 5 + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 +printf "%s\n" "$ac_cv_build" >&6; } +case $ac_cv_build in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; +esac +build=$ac_cv_build +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_build +shift +build_cpu=$1 +build_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +build_os=$* +IFS=$ac_save_IFS +case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 +printf %s "checking host system type... " >&6; } +if test ${ac_cv_host+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test "x$host_alias" = x; then + ac_cv_host=$ac_cv_build +else + ac_cv_host=`$SHELL "${ac_aux_dir}config.sub" $host_alias` || + as_fn_error $? "$SHELL ${ac_aux_dir}config.sub $host_alias failed" "$LINENO" 5 +fi + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 +printf "%s\n" "$ac_cv_host" >&6; } +case $ac_cv_host in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; +esac +host=$ac_cv_host +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_host +shift +host_cpu=$1 +host_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +host_os=$* +IFS=$ac_save_IFS +case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac + + +# Backslashify metacharacters that are still active within +# double-quoted strings. +sed_quote_subst='s/\(["`$\\]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\(["`\\]\)/\\\1/g' + +# Sed substitution to delay expansion of an escaped shell variable in a +# double_quote_subst'ed string. +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' + +# Sed substitution to delay expansion of an escaped single quote. +delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' + +# Sed substitution to avoid accidental globbing in evaled expressions +no_glob_subst='s/\*/\\\*/g' + +ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 +printf %s "checking how to print strings... " >&6; } +# Test print first, because it will be a builtin if present. +if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' +elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='printf %s\n' +else + # Use this function as a fallback that always works. + func_fallback_echo () + { + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' + } + ECHO='func_fallback_echo' +fi + +# func_echo_all arg... +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "" +} + +case $ECHO in + printf*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: printf" >&5 +printf "%s\n" "printf" >&6; } ;; + print*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: print -r" >&5 +printf "%s\n" "print -r" >&6; } ;; + *) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: cat" >&5 +printf "%s\n" "cat" >&6; } ;; +esac + + + + + + + + + + + + + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 +printf %s "checking for a sed that does not truncate output... " >&6; } +if test ${ac_cv_path_SED+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ + for ac_i in 1 2 3 4 5 6 7; do + ac_script="$ac_script$as_nl$ac_script" + done + echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed + { ac_script=; unset ac_script;} + if test -z "$SED"; then + ac_path_SED_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_prog in sed gsed + do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_SED="$as_dir$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_SED" || continue +# Check for GNU ac_path_SED and select it if it is found. + # Check for GNU $ac_path_SED +case `"$ac_path_SED" --version 2>&1` in +*GNU*) + ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; +*) + ac_count=0 + printf %s 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + printf "%s\n" '' >> "conftest.nl" + "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_SED_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_SED="$ac_path_SED" + ac_path_SED_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_SED_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_SED"; then + as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5 + fi +else + ac_cv_path_SED=$SED +fi + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 +printf "%s\n" "$ac_cv_path_SED" >&6; } + SED="$ac_cv_path_SED" + rm -f conftest.sed + +test -z "$SED" && SED=sed +Xsed="$SED -e 1s/^X//" + + + + + + + + + + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 +printf %s "checking for grep that handles long lines and -e... " >&6; } +if test ${ac_cv_path_GREP+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -z "$GREP"; then + ac_path_GREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_prog in grep ggrep + do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_GREP="$as_dir$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_GREP" || continue +# Check for GNU ac_path_GREP and select it if it is found. + # Check for GNU $ac_path_GREP +case `"$ac_path_GREP" --version 2>&1` in +*GNU*) + ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; +*) + ac_count=0 + printf %s 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + printf "%s\n" 'GREP' >> "conftest.nl" + "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_GREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_GREP="$ac_path_GREP" + ac_path_GREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_GREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_GREP"; then + as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_GREP=$GREP +fi + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 +printf "%s\n" "$ac_cv_path_GREP" >&6; } + GREP="$ac_cv_path_GREP" + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 +printf %s "checking for egrep... " >&6; } +if test ${ac_cv_path_EGREP+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 + then ac_cv_path_EGREP="$GREP -E" + else + if test -z "$EGREP"; then + ac_path_EGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_prog in egrep + do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_EGREP="$as_dir$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_EGREP" || continue +# Check for GNU ac_path_EGREP and select it if it is found. + # Check for GNU $ac_path_EGREP +case `"$ac_path_EGREP" --version 2>&1` in +*GNU*) + ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; +*) + ac_count=0 + printf %s 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + printf "%s\n" 'EGREP' >> "conftest.nl" + "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_EGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_EGREP="$ac_path_EGREP" + ac_path_EGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_EGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_EGREP"; then + as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_EGREP=$EGREP +fi + + fi +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 +printf "%s\n" "$ac_cv_path_EGREP" >&6; } + EGREP="$ac_cv_path_EGREP" + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5 +printf %s "checking for fgrep... " >&6; } +if test ${ac_cv_path_FGREP+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 + then ac_cv_path_FGREP="$GREP -F" + else + if test -z "$FGREP"; then + ac_path_FGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_prog in fgrep + do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_FGREP="$as_dir$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_FGREP" || continue +# Check for GNU ac_path_FGREP and select it if it is found. + # Check for GNU $ac_path_FGREP +case `"$ac_path_FGREP" --version 2>&1` in +*GNU*) + ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; +*) + ac_count=0 + printf %s 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + printf "%s\n" 'FGREP' >> "conftest.nl" + "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_FGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_FGREP="$ac_path_FGREP" + ac_path_FGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_FGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_FGREP"; then + as_fn_error $? "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_FGREP=$FGREP +fi + + fi +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5 +printf "%s\n" "$ac_cv_path_FGREP" >&6; } + FGREP="$ac_cv_path_FGREP" + + +test -z "$GREP" && GREP=grep + + + + + + + + + + + + + + + + + + + +# Check whether --with-gnu-ld was given. +if test ${with_gnu_ld+y} +then : + withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes +else $as_nop + with_gnu_ld=no +fi + +ac_prog=ld +if test yes = "$GCC"; then + # Check if gcc -print-prog-name=ld gives a path. + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 +printf %s "checking for ld used by $CC... " >&6; } + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return, which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | ?:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD=$ac_prog + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test yes = "$with_gnu_ld"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 +printf %s "checking for GNU ld... " >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 +printf %s "checking for non-GNU ld... " >&6; } +fi +if test ${lt_cv_path_LD+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -z "$LD"; then + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD=$ac_dir/$ac_prog + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +printf "%s\n" "$LD" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi +test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 +printf %s "checking if the linker ($LD) is GNU ld... " >&6; } +if test ${lt_cv_prog_gnu_ld+y} +then : + printf %s "(cached) " >&6 +else $as_nop + # I'd rather use --version here, but apparently some GNU lds only accept -v. +case `$LD -v 2>&1 &5 +printf "%s\n" "$lt_cv_prog_gnu_ld" >&6; } +with_gnu_ld=$lt_cv_prog_gnu_ld + + + + + + + + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5 +printf %s "checking for BSD- or MS-compatible name lister (nm)... " >&6; } +if test ${lt_cv_path_NM+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$NM"; then + # Let the user override the test. + lt_cv_path_NM=$NM +else + lt_nm_to_check=${ac_tool_prefix}nm + if test -n "$ac_tool_prefix" && test "$build" = "$host"; then + lt_nm_to_check="$lt_nm_to_check nm" + fi + for lt_tmp_nm in $lt_nm_to_check; do + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + tmp_nm=$ac_dir/$lt_tmp_nm + if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then + # Check to see if the nm accepts a BSD-compat flag. + # Adding the 'sed 1q' prevents false positives on HP-UX, which says: + # nm: unknown option "B" ignored + # Tru64's nm complains that /dev/null is an invalid object file + # MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty + case $build_os in + mingw*) lt_bad_file=conftest.nm/nofile ;; + *) lt_bad_file=/dev/null ;; + esac + case `"$tmp_nm" -B $lt_bad_file 2>&1 | $SED '1q'` in + *$lt_bad_file* | *'Invalid file or object type'*) + lt_cv_path_NM="$tmp_nm -B" + break 2 + ;; + *) + case `"$tmp_nm" -p /dev/null 2>&1 | $SED '1q'` in + */dev/null*) + lt_cv_path_NM="$tmp_nm -p" + break 2 + ;; + *) + lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but + continue # so that we can try to find one that supports BSD flags + ;; + esac + ;; + esac + fi + done + IFS=$lt_save_ifs + done + : ${lt_cv_path_NM=no} +fi +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5 +printf "%s\n" "$lt_cv_path_NM" >&6; } +if test no != "$lt_cv_path_NM"; then + NM=$lt_cv_path_NM +else + # Didn't find any BSD compatible name lister, look for dumpbin. + if test -n "$DUMPBIN"; then : + # Let the user override the test. + else + if test -n "$ac_tool_prefix"; then + for ac_prog in dumpbin "link -dump" + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_DUMPBIN+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$DUMPBIN"; then + ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DUMPBIN=$ac_cv_prog_DUMPBIN +if test -n "$DUMPBIN"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5 +printf "%s\n" "$DUMPBIN" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + + test -n "$DUMPBIN" && break + done +fi +if test -z "$DUMPBIN"; then + ac_ct_DUMPBIN=$DUMPBIN + for ac_prog in dumpbin "link -dump" +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_DUMPBIN+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_DUMPBIN"; then + ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN +if test -n "$ac_ct_DUMPBIN"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5 +printf "%s\n" "$ac_ct_DUMPBIN" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + + test -n "$ac_ct_DUMPBIN" && break +done + + if test "x$ac_ct_DUMPBIN" = x; then + DUMPBIN=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DUMPBIN=$ac_ct_DUMPBIN + fi +fi + + case `$DUMPBIN -symbols -headers /dev/null 2>&1 | $SED '1q'` in + *COFF*) + DUMPBIN="$DUMPBIN -symbols -headers" + ;; + *) + DUMPBIN=: + ;; + esac + fi + + if test : != "$DUMPBIN"; then + NM=$DUMPBIN + fi +fi +test -z "$NM" && NM=nm + + + + + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5 +printf %s "checking the name lister ($NM) interface... " >&6; } +if test ${lt_cv_nm_interface+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_nm_interface="BSD nm" + echo "int some_variable = 0;" > conftest.$ac_ext + (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5) + (eval "$ac_compile" 2>conftest.err) + cat conftest.err >&5 + (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5) + (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) + cat conftest.err >&5 + (eval echo "\"\$as_me:$LINENO: output\"" >&5) + cat conftest.out >&5 + if $GREP 'External.*some_variable' conftest.out > /dev/null; then + lt_cv_nm_interface="MS dumpbin" + fi + rm -f conftest* +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5 +printf "%s\n" "$lt_cv_nm_interface" >&6; } + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 +printf %s "checking whether ln -s works... " >&6; } +LN_S=$as_ln_s +if test "$LN_S" = "ln -s"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 +printf "%s\n" "no, using $LN_S" >&6; } +fi + +# find the maximum length of command line arguments +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5 +printf %s "checking the maximum length of command line arguments... " >&6; } +if test ${lt_cv_sys_max_cmd_len+y} +then : + printf %s "(cached) " >&6 +else $as_nop + i=0 + teststring=ABCD + + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; + + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; + + cygwin* | mingw* | cegcc*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; + + mint*) + # On MiNT this can take a long time and run out of memory. + lt_cv_sys_max_cmd_len=8192; + ;; + + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + bitrig* | darwin* | dragonfly* | freebsd* | midnightbsd* | netbsd* | openbsd*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + ;; + + interix*) + # We know the value 262144 and hardcode it with a safety zone (like BSD) + lt_cv_sys_max_cmd_len=196608 + ;; + + os2*) + # The test takes a long time on OS/2. + lt_cv_sys_max_cmd_len=8192 + ;; + + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi + ;; + sco3.2v5*) + lt_cv_sys_max_cmd_len=102400 + ;; + sysv5* | sco5v6* | sysv4.2uw2*) + kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` + if test -n "$kargmax"; then + lt_cv_sys_max_cmd_len=`echo $kargmax | $SED 's/.*[ ]//'` + else + lt_cv_sys_max_cmd_len=32768 + fi + ;; + *) + lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` + if test -n "$lt_cv_sys_max_cmd_len" && \ + test undefined != "$lt_cv_sys_max_cmd_len"; then + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + else + # Make teststring a little bigger before we do anything with it. + # a 1K string should be a reasonable start. + for i in 1 2 3 4 5 6 7 8; do + teststring=$teststring$teststring + done + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + while { test X`env echo "$teststring$teststring" 2>/dev/null` \ + = "X$teststring$teststring"; } >/dev/null 2>&1 && + test 17 != "$i" # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + # Only check the string length outside the loop. + lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` + teststring= + # Add a significant safety factor because C++ compilers can tack on + # massive amounts of additional arguments before passing them to the + # linker. It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + fi + ;; + esac + +fi + +if test -n "$lt_cv_sys_max_cmd_len"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5 +printf "%s\n" "$lt_cv_sys_max_cmd_len" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none" >&5 +printf "%s\n" "none" >&6; } +fi +max_cmd_len=$lt_cv_sys_max_cmd_len + + + + + + +: ${CP="cp -f"} +: ${MV="mv -f"} +: ${RM="rm -f"} + +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + lt_unset=unset +else + lt_unset=false +fi + + + + + +# test EBCDIC or ASCII +case `echo X|tr X '\101'` in + A) # ASCII based system + # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr + lt_SP2NL='tr \040 \012' + lt_NL2SP='tr \015\012 \040\040' + ;; + *) # EBCDIC based system + lt_SP2NL='tr \100 \n' + lt_NL2SP='tr \r\n \100\100' + ;; +esac + + + + + + + + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 +printf %s "checking how to convert $build file names to $host format... " >&6; } +if test ${lt_cv_to_host_file_cmd+y} +then : + printf %s "(cached) " >&6 +else $as_nop + case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 + ;; + esac + ;; + *-*-cygwin* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin + ;; + esac + ;; + * ) # unhandled hosts (and "normal" native builds) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; +esac + +fi + +to_host_file_cmd=$lt_cv_to_host_file_cmd +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 +printf "%s\n" "$lt_cv_to_host_file_cmd" >&6; } + + + + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 +printf %s "checking how to convert $build file names to toolchain format... " >&6; } +if test ${lt_cv_to_tool_file_cmd+y} +then : + printf %s "(cached) " >&6 +else $as_nop + #assume ordinary cross tools, or native build. +lt_cv_to_tool_file_cmd=func_convert_file_noop +case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 + ;; + esac + ;; +esac + +fi + +to_tool_file_cmd=$lt_cv_to_tool_file_cmd +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 +printf "%s\n" "$lt_cv_to_tool_file_cmd" >&6; } + + + + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 +printf %s "checking for $LD option to reload object files... " >&6; } +if test ${lt_cv_ld_reload_flag+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_ld_reload_flag='-r' +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5 +printf "%s\n" "$lt_cv_ld_reload_flag" >&6; } +reload_flag=$lt_cv_ld_reload_flag +case $reload_flag in +"" | " "*) ;; +*) reload_flag=" $reload_flag" ;; +esac +reload_cmds='$LD$reload_flag -o $output$reload_objs' +case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + if test yes != "$GCC"; then + reload_cmds=false + fi + ;; + darwin*) + if test yes = "$GCC"; then + reload_cmds='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs' + else + reload_cmds='$LD$reload_flag -o $output$reload_objs' + fi + ;; +esac + + + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}file", so it can be a program name with args. +set dummy ${ac_tool_prefix}file; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_FILECMD+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$FILECMD"; then + ac_cv_prog_FILECMD="$FILECMD" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_FILECMD="${ac_tool_prefix}file" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +FILECMD=$ac_cv_prog_FILECMD +if test -n "$FILECMD"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $FILECMD" >&5 +printf "%s\n" "$FILECMD" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_FILECMD"; then + ac_ct_FILECMD=$FILECMD + # Extract the first word of "file", so it can be a program name with args. +set dummy file; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_FILECMD+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_FILECMD"; then + ac_cv_prog_ac_ct_FILECMD="$ac_ct_FILECMD" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_FILECMD="file" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_FILECMD=$ac_cv_prog_ac_ct_FILECMD +if test -n "$ac_ct_FILECMD"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_FILECMD" >&5 +printf "%s\n" "$ac_ct_FILECMD" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + if test "x$ac_ct_FILECMD" = x; then + FILECMD=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + FILECMD=$ac_ct_FILECMD + fi +else + FILECMD="$ac_cv_prog_FILECMD" +fi + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. +set dummy ${ac_tool_prefix}objdump; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_OBJDUMP+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$OBJDUMP"; then + ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OBJDUMP=$ac_cv_prog_OBJDUMP +if test -n "$OBJDUMP"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 +printf "%s\n" "$OBJDUMP" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OBJDUMP"; then + ac_ct_OBJDUMP=$OBJDUMP + # Extract the first word of "objdump", so it can be a program name with args. +set dummy objdump; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_OBJDUMP+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_OBJDUMP"; then + ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OBJDUMP="objdump" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP +if test -n "$ac_ct_OBJDUMP"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 +printf "%s\n" "$ac_ct_OBJDUMP" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + if test "x$ac_ct_OBJDUMP" = x; then + OBJDUMP="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OBJDUMP=$ac_ct_OBJDUMP + fi +else + OBJDUMP="$ac_cv_prog_OBJDUMP" +fi + +test -z "$OBJDUMP" && OBJDUMP=objdump + + + + + + + + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5 +printf %s "checking how to recognize dependent libraries... " >&6; } +if test ${lt_cv_deplibs_check_method+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_file_magic_cmd='$MAGIC_CMD' +lt_cv_file_magic_test_file= +lt_cv_deplibs_check_method='unknown' +# Need to set the preceding variable on all platforms that support +# interlibrary dependencies. +# 'none' -- dependencies not supported. +# 'unknown' -- same as none, but documents that we really don't know. +# 'pass_all' -- all dependencies passed with no checks. +# 'test_compile' -- check by making test program. +# 'file_magic [[regex]]' -- check by looking for files in library path +# that responds to the $file_magic_cmd with a given extended regex. +# If you have 'file' or equivalent on your system and you're not sure +# whether 'pass_all' will *always* work, you probably want this one. + +case $host_os in +aix[4-9]*) + lt_cv_deplibs_check_method=pass_all + ;; + +beos*) + lt_cv_deplibs_check_method=pass_all + ;; + +bsdi[45]*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' + lt_cv_file_magic_cmd='$FILECMD -L' + lt_cv_file_magic_test_file=/shlib/libc.so + ;; + +cygwin*) + # func_win32_libid is a shell function defined in ltmain.sh + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + ;; + +mingw* | pw32*) + # Base MSYS/MinGW do not provide the 'file' command needed by + # func_win32_libid shell function, so use a weaker test based on 'objdump', + # unless we find 'file', for example because we are cross-compiling. + if ( file / ) >/dev/null 2>&1; then + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else + # Keep this pattern in sync with the one in func_win32_libid. + lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; + +cegcc*) + # use the weaker test based on 'objdump'. See mingw*. + lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' + ;; + +darwin* | rhapsody*) + lt_cv_deplibs_check_method=pass_all + ;; + +freebsd* | dragonfly* | midnightbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' + lt_cv_file_magic_cmd=$FILECMD + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac + else + lt_cv_deplibs_check_method=pass_all + fi + ;; + +haiku*) + lt_cv_deplibs_check_method=pass_all + ;; + +hpux10.20* | hpux11*) + lt_cv_file_magic_cmd=$FILECMD + case $host_cpu in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' + lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so + ;; + hppa*64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]' + lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl + ;; + *) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library' + lt_cv_file_magic_test_file=/usr/lib/libc.sl + ;; + esac + ;; + +interix[3-9]*) + # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' + ;; + +irix5* | irix6* | nonstopux*) + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; + esac + lt_cv_deplibs_check_method=pass_all + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + lt_cv_deplibs_check_method=pass_all + ;; + +netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' + fi + ;; + +newos6*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' + lt_cv_file_magic_cmd=$FILECMD + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; + +*nto* | *qnx*) + lt_cv_deplibs_check_method=pass_all + ;; + +openbsd* | bitrig*) + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + fi + ;; + +osf3* | osf4* | osf5*) + lt_cv_deplibs_check_method=pass_all + ;; + +rdos*) + lt_cv_deplibs_check_method=pass_all + ;; + +solaris*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv4 | sysv4.3*) + case $host_vendor in + motorola) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + ;; + ncr) + lt_cv_deplibs_check_method=pass_all + ;; + sequent) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' + ;; + sni) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" + lt_cv_file_magic_test_file=/lib/libc.so + ;; + siemens) + lt_cv_deplibs_check_method=pass_all + ;; + pc) + lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; + +tpf*) + lt_cv_deplibs_check_method=pass_all + ;; +os2*) + lt_cv_deplibs_check_method=pass_all + ;; +esac + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 +printf "%s\n" "$lt_cv_deplibs_check_method" >&6; } + +file_magic_glob= +want_nocaseglob=no +if test "$build" = "$host"; then + case $host_os in + mingw* | pw32*) + if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then + want_nocaseglob=yes + else + file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` + fi + ;; + esac +fi + +file_magic_cmd=$lt_cv_file_magic_cmd +deplibs_check_method=$lt_cv_deplibs_check_method +test -z "$deplibs_check_method" && deplibs_check_method=unknown + + + + + + + + + + + + + + + + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. +set dummy ${ac_tool_prefix}dlltool; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_DLLTOOL+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$DLLTOOL"; then + ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DLLTOOL=$ac_cv_prog_DLLTOOL +if test -n "$DLLTOOL"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 +printf "%s\n" "$DLLTOOL" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_DLLTOOL"; then + ac_ct_DLLTOOL=$DLLTOOL + # Extract the first word of "dlltool", so it can be a program name with args. +set dummy dlltool; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_DLLTOOL+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_DLLTOOL"; then + ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_DLLTOOL="dlltool" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL +if test -n "$ac_ct_DLLTOOL"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 +printf "%s\n" "$ac_ct_DLLTOOL" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + if test "x$ac_ct_DLLTOOL" = x; then + DLLTOOL="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DLLTOOL=$ac_ct_DLLTOOL + fi +else + DLLTOOL="$ac_cv_prog_DLLTOOL" +fi + +test -z "$DLLTOOL" && DLLTOOL=dlltool + + + + + + + + + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 +printf %s "checking how to associate runtime and link libraries... " >&6; } +if test ${lt_cv_sharedlib_from_linklib_cmd+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_sharedlib_from_linklib_cmd='unknown' + +case $host_os in +cygwin* | mingw* | pw32* | cegcc*) + # two different shell functions defined in ltmain.sh; + # decide which one to use based on capabilities of $DLLTOOL + case `$DLLTOOL --help 2>&1` in + *--identify-strict*) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib + ;; + *) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback + ;; + esac + ;; +*) + # fallback: assume linklib IS sharedlib + lt_cv_sharedlib_from_linklib_cmd=$ECHO + ;; +esac + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 +printf "%s\n" "$lt_cv_sharedlib_from_linklib_cmd" >&6; } +sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd +test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO + + + + + + + + +if test -n "$ac_tool_prefix"; then + for ac_prog in ar + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_AR+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$AR"; then + ac_cv_prog_AR="$AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_AR="$ac_tool_prefix$ac_prog" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AR=$ac_cv_prog_AR +if test -n "$AR"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 +printf "%s\n" "$AR" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + + test -n "$AR" && break + done +fi +if test -z "$AR"; then + ac_ct_AR=$AR + for ac_prog in ar +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_AR+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_AR"; then + ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_AR="$ac_prog" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_AR=$ac_cv_prog_ac_ct_AR +if test -n "$ac_ct_AR"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 +printf "%s\n" "$ac_ct_AR" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + + test -n "$ac_ct_AR" && break +done + + if test "x$ac_ct_AR" = x; then + AR="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + AR=$ac_ct_AR + fi +fi + +: ${AR=ar} + + + + + + +# Use ARFLAGS variable as AR's operation code to sync the variable naming with +# Automake. If both AR_FLAGS and ARFLAGS are specified, AR_FLAGS should have +# higher priority because thats what people were doing historically (setting +# ARFLAGS for automake and AR_FLAGS for libtool). FIXME: Make the AR_FLAGS +# variable obsoleted/removed. + +test ${AR_FLAGS+y} || AR_FLAGS=${ARFLAGS-cr} +lt_ar_flags=$AR_FLAGS + + + + + + +# Make AR_FLAGS overridable by 'make ARFLAGS='. Don't try to run-time override +# by AR_FLAGS because that was never working and AR_FLAGS is about to die. + + + + + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 +printf %s "checking for archiver @FILE support... " >&6; } +if test ${lt_cv_ar_at_file+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_ar_at_file=no + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO" +then : + echo conftest.$ac_objext > conftest.lst + lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' + { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 + (eval $lt_ar_try) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if test 0 -eq "$ac_status"; then + # Ensure the archiver fails upon bogus file names. + rm -f conftest.$ac_objext libconftest.a + { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 + (eval $lt_ar_try) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if test 0 -ne "$ac_status"; then + lt_cv_ar_at_file=@ + fi + fi + rm -f conftest.* libconftest.a + +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 +printf "%s\n" "$lt_cv_ar_at_file" >&6; } + +if test no = "$lt_cv_ar_at_file"; then + archiver_list_spec= +else + archiver_list_spec=$lt_cv_ar_at_file +fi + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. +set dummy ${ac_tool_prefix}strip; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_STRIP+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +STRIP=$ac_cv_prog_STRIP +if test -n "$STRIP"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 +printf "%s\n" "$STRIP" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. +set dummy strip; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_STRIP+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_STRIP="strip" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP +if test -n "$ac_ct_STRIP"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 +printf "%s\n" "$ac_ct_STRIP" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + if test "x$ac_ct_STRIP" = x; then + STRIP=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + STRIP=$ac_ct_STRIP + fi +else + STRIP="$ac_cv_prog_STRIP" +fi + +test -z "$STRIP" && STRIP=: + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. +set dummy ${ac_tool_prefix}ranlib; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_RANLIB+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$RANLIB"; then + ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +RANLIB=$ac_cv_prog_RANLIB +if test -n "$RANLIB"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 +printf "%s\n" "$RANLIB" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_RANLIB"; then + ac_ct_RANLIB=$RANLIB + # Extract the first word of "ranlib", so it can be a program name with args. +set dummy ranlib; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_RANLIB+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_RANLIB"; then + ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_RANLIB="ranlib" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB +if test -n "$ac_ct_RANLIB"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 +printf "%s\n" "$ac_ct_RANLIB" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + if test "x$ac_ct_RANLIB" = x; then + RANLIB=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + RANLIB=$ac_ct_RANLIB + fi +else + RANLIB="$ac_cv_prog_RANLIB" +fi + +test -z "$RANLIB" && RANLIB=: + + + + + + +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= + +if test -n "$RANLIB"; then + case $host_os in + bitrig* | openbsd*) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" + ;; + *) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" + ;; + esac + old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" +fi + +case $host_os in + darwin*) + lock_old_archive_extraction=yes ;; + *) + lock_old_archive_extraction=no ;; +esac + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# Check for command to grab the raw symbol name followed by C symbol from nm. +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5 +printf %s "checking command to parse $NM output from $compiler object... " >&6; } +if test ${lt_cv_sys_global_symbol_pipe+y} +then : + printf %s "(cached) " >&6 +else $as_nop + +# These are sane defaults that work on at least a few old systems. +# [They come from Ultrix. What could be older than Ultrix?!! ;)] + +# Character class describing NM global symbol codes. +symcode='[BCDEGRST]' + +# Regexp to match symbols that can be accessed directly from C. +sympat='\([_A-Za-z][_A-Za-z0-9]*\)' + +# Define system-specific variables. +case $host_os in +aix*) + symcode='[BCDT]' + ;; +cygwin* | mingw* | pw32* | cegcc*) + symcode='[ABCDGISTW]' + ;; +hpux*) + if test ia64 = "$host_cpu"; then + symcode='[ABCDEGRST]' + fi + ;; +irix* | nonstopux*) + symcode='[BCDEGRST]' + ;; +osf*) + symcode='[BCDEGQRST]' + ;; +solaris*) + symcode='[BDRT]' + ;; +sco3.2v5*) + symcode='[DT]' + ;; +sysv4.2uw2*) + symcode='[DT]' + ;; +sysv5* | sco5v6* | unixware* | OpenUNIX*) + symcode='[ABDT]' + ;; +sysv4) + symcode='[DFNSTU]' + ;; +esac + +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[ABCDGIRSTW]' ;; +esac + +if test "$lt_cv_nm_interface" = "MS dumpbin"; then + # Gets list of data symbols to import. + lt_cv_sys_global_symbol_to_import="$SED -n -e 's/^I .* \(.*\)$/\1/p'" + # Adjust the below global symbol transforms to fixup imported variables. + lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'" + lt_c_name_hook=" -e 's/^I .* \(.*\)$/ {\"\1\", (void *) 0},/p'" + lt_c_name_lib_hook="\ + -e 's/^I .* \(lib.*\)$/ {\"\1\", (void *) 0},/p'\ + -e 's/^I .* \(.*\)$/ {\"lib\1\", (void *) 0},/p'" +else + # Disable hooks by default. + lt_cv_sys_global_symbol_to_import= + lt_cdecl_hook= + lt_c_name_hook= + lt_c_name_lib_hook= +fi + +# Transform an extracted symbol line into a proper C declaration. +# Some systems (esp. on ia64) link data and code symbols differently, +# so use this general approach. +lt_cv_sys_global_symbol_to_cdecl="$SED -n"\ +$lt_cdecl_hook\ +" -e 's/^T .* \(.*\)$/extern int \1();/p'"\ +" -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'" + +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="$SED -n"\ +$lt_c_name_hook\ +" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ +" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/p'" + +# Transform an extracted symbol line into symbol name with lib prefix and +# symbol address. +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="$SED -n"\ +$lt_c_name_lib_hook\ +" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ +" -e 's/^$symcode$symcode* .* \(lib.*\)$/ {\"\1\", (void *) \&\1},/p'"\ +" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"lib\1\", (void *) \&\1},/p'" + +# Handle CRLF in mingw tool chain +opt_cr= +case $build_os in +mingw*) + opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp + ;; +esac + +# Try without a prefix underscore, then with it. +for ac_symprfx in "" "_"; do + + # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. + symxfrm="\\1 $ac_symprfx\\2 \\2" + + # Write the raw and C identifiers. + if test "$lt_cv_nm_interface" = "MS dumpbin"; then + # Fake it for dumpbin and say T for any non-static function, + # D for any global variable and I for any imported variable. + # Also find C++ and __fastcall symbols from MSVC++ or ICC, + # which start with @ or ?. + lt_cv_sys_global_symbol_pipe="$AWK '"\ +" {last_section=section; section=\$ 3};"\ +" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ +" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ +" /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\ +" /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\ +" /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\ +" \$ 0!~/External *\|/{next};"\ +" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ +" {if(hide[section]) next};"\ +" {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\ +" {split(\$ 0,a,/\||\r/); split(a[2],s)};"\ +" s[1]~/^[@?]/{print f,s[1],s[1]; next};"\ +" s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\ +" ' prfx=^$ac_symprfx" + else + lt_cv_sys_global_symbol_pipe="$SED -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi + lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | $SED '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no + + rm -f conftest* + cat > conftest.$ac_ext <<_LT_EOF +#ifdef __cplusplus +extern "C" { +#endif +char nm_test_var; +void nm_test_func(void); +void nm_test_func(void){} +#ifdef __cplusplus +} +#endif +int main(){nm_test_var='a';nm_test_func();return(0);} +_LT_EOF + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + # Now try to grab the symbols. + nlist=conftest.nm + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist\""; } >&5 + (eval $NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then + mv -f "$nlist"T "$nlist" + else + rm -f "$nlist"T + fi + + # Make sure that we snagged all the symbols we need. + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE +/* DATA imports from DLLs on WIN32 can't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT_DLSYM_CONST +#elif defined __osf__ +/* This system does not cope well with relocations in const data. */ +# define LT_DLSYM_CONST +#else +# define LT_DLSYM_CONST const +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +_LT_EOF + # Now generate the symbol file. + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' + + cat <<_LT_EOF >> conftest.$ac_ext + +/* The mapping between symbol names and symbols. */ +LT_DLSYM_CONST struct { + const char *name; + void *address; +} +lt__PROGRAM__LTX_preloaded_symbols[] = +{ + { "@PROGRAM@", (void *) 0 }, +_LT_EOF + $SED "s/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext + cat <<\_LT_EOF >> conftest.$ac_ext + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt__PROGRAM__LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif +_LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext + lt_globsym_save_LIBS=$LIBS + lt_globsym_save_CFLAGS=$CFLAGS + LIBS=conftstm.$ac_objext + CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s conftest$ac_exeext; then + pipe_works=yes + fi + LIBS=$lt_globsym_save_LIBS + CFLAGS=$lt_globsym_save_CFLAGS + else + echo "cannot find nm_test_func in $nlist" >&5 + fi + else + echo "cannot find nm_test_var in $nlist" >&5 + fi + else + echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 + fi + else + echo "$progname: failed program was:" >&5 + cat conftest.$ac_ext >&5 + fi + rm -rf conftest* conftst* + + # Do not use the global_symbol_pipe unless it works. + if test yes = "$pipe_works"; then + break + else + lt_cv_sys_global_symbol_pipe= + fi +done + +fi + +if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= +fi +if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: failed" >&5 +printf "%s\n" "failed" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ok" >&5 +printf "%s\n" "ok" >&6; } +fi + +# Response file support. +if test "$lt_cv_nm_interface" = "MS dumpbin"; then + nm_file_list_spec='@' +elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then + nm_file_list_spec='@' +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 +printf %s "checking for sysroot... " >&6; } + +# Check whether --with-sysroot was given. +if test ${with_sysroot+y} +then : + withval=$with_sysroot; +else $as_nop + with_sysroot=no +fi + + +lt_sysroot= +case $with_sysroot in #( + yes) + if test yes = "$GCC"; then + lt_sysroot=`$CC --print-sysroot 2>/dev/null` + fi + ;; #( + /*) + lt_sysroot=`echo "$with_sysroot" | $SED -e "$sed_quote_subst"` + ;; #( + no|'') + ;; #( + *) + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $with_sysroot" >&5 +printf "%s\n" "$with_sysroot" >&6; } + as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 + ;; +esac + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 +printf "%s\n" "${lt_sysroot:-no}" >&6; } + + + + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a working dd" >&5 +printf %s "checking for a working dd... " >&6; } +if test ${ac_cv_path_lt_DD+y} +then : + printf %s "(cached) " >&6 +else $as_nop + printf 0123456789abcdef0123456789abcdef >conftest.i +cat conftest.i conftest.i >conftest2.i +: ${lt_DD:=$DD} +if test -z "$lt_DD"; then + ac_path_lt_DD_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_prog in dd + do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_lt_DD="$as_dir$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_lt_DD" || continue +if "$ac_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then + cmp -s conftest.i conftest.out \ + && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=: +fi + $ac_path_lt_DD_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_lt_DD"; then + : + fi +else + ac_cv_path_lt_DD=$lt_DD +fi + +rm -f conftest.i conftest2.i conftest.out +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_lt_DD" >&5 +printf "%s\n" "$ac_cv_path_lt_DD" >&6; } + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to truncate binary pipes" >&5 +printf %s "checking how to truncate binary pipes... " >&6; } +if test ${lt_cv_truncate_bin+y} +then : + printf %s "(cached) " >&6 +else $as_nop + printf 0123456789abcdef0123456789abcdef >conftest.i +cat conftest.i conftest.i >conftest2.i +lt_cv_truncate_bin= +if "$ac_cv_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then + cmp -s conftest.i conftest.out \ + && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1" +fi +rm -f conftest.i conftest2.i conftest.out +test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q" +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_truncate_bin" >&5 +printf "%s\n" "$lt_cv_truncate_bin" >&6; } + + + + + + + +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +func_cc_basename () +{ + for cc_temp in $*""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac + done + func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +} + +# Check whether --enable-libtool-lock was given. +if test ${enable_libtool_lock+y} +then : + enableval=$enable_libtool_lock; +fi + +test no = "$enable_libtool_lock" || enable_libtool_lock=yes + +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out what ABI is being produced by ac_compile, and set mode + # options accordingly. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `$FILECMD conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE=32 + ;; + *ELF-64*) + HPUX_IA64_MODE=64 + ;; + esac + fi + rm -rf conftest* + ;; +*-*-irix6*) + # Find out what ABI is being produced by ac_compile, and set linker + # options accordingly. + echo '#line '$LINENO' "configure"' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + if test yes = "$lt_cv_prog_gnu_ld"; then + case `$FILECMD conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `$FILECMD conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* + ;; + +mips64*-*linux*) + # Find out what ABI is being produced by ac_compile, and set linker + # options accordingly. + echo '#line '$LINENO' "configure"' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + emul=elf + case `$FILECMD conftest.$ac_objext` in + *32-bit*) + emul="${emul}32" + ;; + *64-bit*) + emul="${emul}64" + ;; + esac + case `$FILECMD conftest.$ac_objext` in + *MSB*) + emul="${emul}btsmip" + ;; + *LSB*) + emul="${emul}ltsmip" + ;; + esac + case `$FILECMD conftest.$ac_objext` in + *N32*) + emul="${emul}n32" + ;; + esac + LD="${LD-ld} -m $emul" + fi + rm -rf conftest* + ;; + +x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ +s390*-*linux*|s390*-*tpf*|sparc*-*linux*) + # Find out what ABI is being produced by ac_compile, and set linker + # options accordingly. Note that the listed cases only cover the + # situations where additional linker options are needed (such as when + # doing 32-bit compilation for a host where ld defaults to 64-bit, or + # vice versa); the common cases where no linker options are needed do + # not appear in the list. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `$FILECMD conftest.o` in + *32-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_i386_fbsd" + ;; + x86_64-*linux*) + case `$FILECMD conftest.o` in + *x86-64*) + LD="${LD-ld} -m elf32_x86_64" + ;; + *) + LD="${LD-ld} -m elf_i386" + ;; + esac + ;; + powerpc64le-*linux*) + LD="${LD-ld} -m elf32lppclinux" + ;; + powerpc64-*linux*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_x86_64_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + powerpcle-*linux*) + LD="${LD-ld} -m elf64lppc" + ;; + powerpc-*linux*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*|s390*-*tpf*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS=$CFLAGS + CFLAGS="$CFLAGS -belf" + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5 +printf %s "checking whether the C compiler needs -belf... " >&6; } +if test ${lt_cv_cc_needs_belf+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + lt_cv_cc_needs_belf=yes +else $as_nop + lt_cv_cc_needs_belf=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5 +printf "%s\n" "$lt_cv_cc_needs_belf" >&6; } + if test yes != "$lt_cv_cc_needs_belf"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS=$SAVE_CFLAGS + fi + ;; +*-*solaris*) + # Find out what ABI is being produced by ac_compile, and set linker + # options accordingly. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `$FILECMD conftest.o` in + *64-bit*) + case $lt_cv_prog_gnu_ld in + yes*) + case $host in + i?86-*-solaris*|x86_64-*-solaris*) + LD="${LD-ld} -m elf_x86_64" + ;; + sparc*-*-solaris*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + # GNU ld 2.21 introduced _sol2 emulations. Use them if available. + if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then + LD=${LD-ld}_sol2 + fi + ;; + *) + if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then + LD="${LD-ld} -64" + fi + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; +esac + +need_locks=$enable_libtool_lock + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. +set dummy ${ac_tool_prefix}mt; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_MANIFEST_TOOL+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$MANIFEST_TOOL"; then + ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL +if test -n "$MANIFEST_TOOL"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 +printf "%s\n" "$MANIFEST_TOOL" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_MANIFEST_TOOL"; then + ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL + # Extract the first word of "mt", so it can be a program name with args. +set dummy mt; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_MANIFEST_TOOL+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_MANIFEST_TOOL"; then + ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL +if test -n "$ac_ct_MANIFEST_TOOL"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 +printf "%s\n" "$ac_ct_MANIFEST_TOOL" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + if test "x$ac_ct_MANIFEST_TOOL" = x; then + MANIFEST_TOOL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL + fi +else + MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" +fi + +test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 +printf %s "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } +if test ${lt_cv_path_mainfest_tool+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_path_mainfest_tool=no + echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 + $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out + cat conftest.err >&5 + if $GREP 'Manifest Tool' conftest.out > /dev/null; then + lt_cv_path_mainfest_tool=yes + fi + rm -f conftest* +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 +printf "%s\n" "$lt_cv_path_mainfest_tool" >&6; } +if test yes != "$lt_cv_path_mainfest_tool"; then + MANIFEST_TOOL=: +fi + + + + + + + case $host_os in + rhapsody* | darwin*) + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. +set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_DSYMUTIL+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$DSYMUTIL"; then + ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DSYMUTIL=$ac_cv_prog_DSYMUTIL +if test -n "$DSYMUTIL"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5 +printf "%s\n" "$DSYMUTIL" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_DSYMUTIL"; then + ac_ct_DSYMUTIL=$DSYMUTIL + # Extract the first word of "dsymutil", so it can be a program name with args. +set dummy dsymutil; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_DSYMUTIL+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_DSYMUTIL"; then + ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL +if test -n "$ac_ct_DSYMUTIL"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5 +printf "%s\n" "$ac_ct_DSYMUTIL" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + if test "x$ac_ct_DSYMUTIL" = x; then + DSYMUTIL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DSYMUTIL=$ac_ct_DSYMUTIL + fi +else + DSYMUTIL="$ac_cv_prog_DSYMUTIL" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. +set dummy ${ac_tool_prefix}nmedit; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_NMEDIT+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$NMEDIT"; then + ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +NMEDIT=$ac_cv_prog_NMEDIT +if test -n "$NMEDIT"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5 +printf "%s\n" "$NMEDIT" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_NMEDIT"; then + ac_ct_NMEDIT=$NMEDIT + # Extract the first word of "nmedit", so it can be a program name with args. +set dummy nmedit; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_NMEDIT+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_NMEDIT"; then + ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_NMEDIT="nmedit" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT +if test -n "$ac_ct_NMEDIT"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5 +printf "%s\n" "$ac_ct_NMEDIT" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + if test "x$ac_ct_NMEDIT" = x; then + NMEDIT=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + NMEDIT=$ac_ct_NMEDIT + fi +else + NMEDIT="$ac_cv_prog_NMEDIT" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. +set dummy ${ac_tool_prefix}lipo; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_LIPO+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$LIPO"; then + ac_cv_prog_LIPO="$LIPO" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_LIPO="${ac_tool_prefix}lipo" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +LIPO=$ac_cv_prog_LIPO +if test -n "$LIPO"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5 +printf "%s\n" "$LIPO" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_LIPO"; then + ac_ct_LIPO=$LIPO + # Extract the first word of "lipo", so it can be a program name with args. +set dummy lipo; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_LIPO+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_LIPO"; then + ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_LIPO="lipo" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO +if test -n "$ac_ct_LIPO"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5 +printf "%s\n" "$ac_ct_LIPO" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + if test "x$ac_ct_LIPO" = x; then + LIPO=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + LIPO=$ac_ct_LIPO + fi +else + LIPO="$ac_cv_prog_LIPO" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. +set dummy ${ac_tool_prefix}otool; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_OTOOL+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$OTOOL"; then + ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_OTOOL="${ac_tool_prefix}otool" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OTOOL=$ac_cv_prog_OTOOL +if test -n "$OTOOL"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5 +printf "%s\n" "$OTOOL" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OTOOL"; then + ac_ct_OTOOL=$OTOOL + # Extract the first word of "otool", so it can be a program name with args. +set dummy otool; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_OTOOL+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_OTOOL"; then + ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OTOOL="otool" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL +if test -n "$ac_ct_OTOOL"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5 +printf "%s\n" "$ac_ct_OTOOL" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + if test "x$ac_ct_OTOOL" = x; then + OTOOL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OTOOL=$ac_ct_OTOOL + fi +else + OTOOL="$ac_cv_prog_OTOOL" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. +set dummy ${ac_tool_prefix}otool64; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_OTOOL64+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$OTOOL64"; then + ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OTOOL64=$ac_cv_prog_OTOOL64 +if test -n "$OTOOL64"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5 +printf "%s\n" "$OTOOL64" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OTOOL64"; then + ac_ct_OTOOL64=$OTOOL64 + # Extract the first word of "otool64", so it can be a program name with args. +set dummy otool64; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_OTOOL64+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_OTOOL64"; then + ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OTOOL64="otool64" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 +if test -n "$ac_ct_OTOOL64"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5 +printf "%s\n" "$ac_ct_OTOOL64" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + if test "x$ac_ct_OTOOL64" = x; then + OTOOL64=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OTOOL64=$ac_ct_OTOOL64 + fi +else + OTOOL64="$ac_cv_prog_OTOOL64" +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5 +printf %s "checking for -single_module linker flag... " >&6; } +if test ${lt_cv_apple_cc_single_mod+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_apple_cc_single_mod=no + if test -z "$LT_MULTI_MODULE"; then + # By default we will add the -single_module flag. You can override + # by either setting the environment variable LT_MULTI_MODULE + # non-empty at configure time, or by adding -multi_module to the + # link flags. + rm -rf libconftest.dylib* + echo "int foo(void){return 1;}" > conftest.c + echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ +-dynamiclib -Wl,-single_module conftest.c" >&5 + $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ + -dynamiclib -Wl,-single_module conftest.c 2>conftest.err + _lt_result=$? + # If there is a non-empty error log, and "single_module" + # appears in it, assume the flag caused a linker warning + if test -s conftest.err && $GREP single_module conftest.err; then + cat conftest.err >&5 + # Otherwise, if the output was created with a 0 exit code from + # the compiler, it worked. + elif test -f libconftest.dylib && test 0 = "$_lt_result"; then + lt_cv_apple_cc_single_mod=yes + else + cat conftest.err >&5 + fi + rm -rf libconftest.dylib* + rm -f conftest.* + fi +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5 +printf "%s\n" "$lt_cv_apple_cc_single_mod" >&6; } + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5 +printf %s "checking for -exported_symbols_list linker flag... " >&6; } +if test ${lt_cv_ld_exported_symbols_list+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_ld_exported_symbols_list=no + save_LDFLAGS=$LDFLAGS + echo "_main" > conftest.sym + LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + lt_cv_ld_exported_symbols_list=yes +else $as_nop + lt_cv_ld_exported_symbols_list=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5 +printf "%s\n" "$lt_cv_ld_exported_symbols_list" >&6; } + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5 +printf %s "checking for -force_load linker flag... " >&6; } +if test ${lt_cv_ld_force_load+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_ld_force_load=no + cat > conftest.c << _LT_EOF +int forced_loaded() { return 2;} +_LT_EOF + echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5 + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 + echo "$AR $AR_FLAGS libconftest.a conftest.o" >&5 + $AR $AR_FLAGS libconftest.a conftest.o 2>&5 + echo "$RANLIB libconftest.a" >&5 + $RANLIB libconftest.a 2>&5 + cat > conftest.c << _LT_EOF +int main() { return 0;} +_LT_EOF + echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5 + $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err + _lt_result=$? + if test -s conftest.err && $GREP force_load conftest.err; then + cat conftest.err >&5 + elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then + lt_cv_ld_force_load=yes + else + cat conftest.err >&5 + fi + rm -f conftest.err libconftest.a conftest conftest.c + rm -rf conftest.dSYM + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5 +printf "%s\n" "$lt_cv_ld_force_load" >&6; } + case $host_os in + rhapsody* | darwin1.[012]) + _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;; + darwin1.*) + _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; + darwin*) + case $MACOSX_DEPLOYMENT_TARGET,$host in + 10.[012],*|,*powerpc*-darwin[5-8]*) + _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; + *) + _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; + esac + ;; + esac + if test yes = "$lt_cv_apple_cc_single_mod"; then + _lt_dar_single_mod='$single_module' + fi + if test yes = "$lt_cv_ld_exported_symbols_list"; then + _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' + else + _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib' + fi + if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then + _lt_dsymutil='~$DSYMUTIL $lib || :' + else + _lt_dsymutil= + fi + ;; + esac + +# func_munge_path_list VARIABLE PATH +# ----------------------------------- +# VARIABLE is name of variable containing _space_ separated list of +# directories to be munged by the contents of PATH, which is string +# having a format: +# "DIR[:DIR]:" +# string "DIR[ DIR]" will be prepended to VARIABLE +# ":DIR[:DIR]" +# string "DIR[ DIR]" will be appended to VARIABLE +# "DIRP[:DIRP]::[DIRA:]DIRA" +# string "DIRP[ DIRP]" will be prepended to VARIABLE and string +# "DIRA[ DIRA]" will be appended to VARIABLE +# "DIR[:DIR]" +# VARIABLE will be replaced by "DIR[ DIR]" +func_munge_path_list () +{ + case x$2 in + x) + ;; + *:) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" + ;; + x:*) + eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" + ;; + *::*) + eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" + eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" + ;; + *) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" + ;; + esac +} + +ac_header= ac_cache= +for ac_item in $ac_header_c_list +do + if test $ac_cache; then + ac_fn_c_check_header_compile "$LINENO" $ac_header ac_cv_header_$ac_cache "$ac_includes_default" + if eval test \"x\$ac_cv_header_$ac_cache\" = xyes; then + printf "%s\n" "#define $ac_item 1" >> confdefs.h + fi + ac_header= ac_cache= + elif test $ac_header; then + ac_cache=$ac_item + else + ac_header=$ac_item + fi +done + + + + + + + + +if test $ac_cv_header_stdlib_h = yes && test $ac_cv_header_string_h = yes +then : + +printf "%s\n" "#define STDC_HEADERS 1" >>confdefs.h + +fi +ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default +" +if test "x$ac_cv_header_dlfcn_h" = xyes +then : + printf "%s\n" "#define HAVE_DLFCN_H 1" >>confdefs.h + +fi + + + + + +# Set options + + + + enable_dlopen=no + + + enable_win32_dll=no + + + # Check whether --enable-shared was given. +if test ${enable_shared+y} +then : + enableval=$enable_shared; p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, + for pkg in $enableval; do + IFS=$lt_save_ifs + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done + IFS=$lt_save_ifs + ;; + esac +else $as_nop + enable_shared=yes +fi + + + + + + + + + + # Check whether --enable-static was given. +if test ${enable_static+y} +then : + enableval=$enable_static; p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, + for pkg in $enableval; do + IFS=$lt_save_ifs + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS=$lt_save_ifs + ;; + esac +else $as_nop + enable_static=yes +fi + + + + + + + + + + +# Check whether --with-pic was given. +if test ${with_pic+y} +then : + withval=$with_pic; lt_p=${PACKAGE-default} + case $withval in + yes|no) pic_mode=$withval ;; + *) + pic_mode=default + # Look at the argument we got. We use all the common list separators. + lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, + for lt_pkg in $withval; do + IFS=$lt_save_ifs + if test "X$lt_pkg" = "X$lt_p"; then + pic_mode=yes + fi + done + IFS=$lt_save_ifs + ;; + esac +else $as_nop + pic_mode=default +fi + + + + + + + + + # Check whether --enable-fast-install was given. +if test ${enable_fast_install+y} +then : + enableval=$enable_fast_install; p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, + for pkg in $enableval; do + IFS=$lt_save_ifs + if test "X$pkg" = "X$p"; then + enable_fast_install=yes + fi + done + IFS=$lt_save_ifs + ;; + esac +else $as_nop + enable_fast_install=yes +fi + + + + + + + + + shared_archive_member_spec= +case $host,$enable_shared in +power*-*-aix[5-9]*,yes) + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking which variant of shared library versioning to provide" >&5 +printf %s "checking which variant of shared library versioning to provide... " >&6; } + +# Check whether --with-aix-soname was given. +if test ${with_aix_soname+y} +then : + withval=$with_aix_soname; case $withval in + aix|svr4|both) + ;; + *) + as_fn_error $? "Unknown argument to --with-aix-soname" "$LINENO" 5 + ;; + esac + lt_cv_with_aix_soname=$with_aix_soname +else $as_nop + if test ${lt_cv_with_aix_soname+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_with_aix_soname=aix +fi + + with_aix_soname=$lt_cv_with_aix_soname +fi + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $with_aix_soname" >&5 +printf "%s\n" "$with_aix_soname" >&6; } + if test aix != "$with_aix_soname"; then + # For the AIX way of multilib, we name the shared archive member + # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o', + # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File. + # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag, + # the AIX toolchain works better with OBJECT_MODE set (default 32). + if test 64 = "${OBJECT_MODE-32}"; then + shared_archive_member_spec=shr_64 + else + shared_archive_member_spec=shr + fi + fi + ;; +*) + with_aix_soname=aix + ;; +esac + + + + + + + + + + +# This can be used to rebuild libtool when needed +LIBTOOL_DEPS=$ltmain + +# Always use our own libtool. +LIBTOOL='$(SHELL) $(top_builddir)/libtool' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +test -z "$LN_S" && LN_S="ln -s" + + + + + + + + + + + + + + +if test -n "${ZSH_VERSION+set}"; then + setopt NO_GLOB_SUBST +fi + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5 +printf %s "checking for objdir... " >&6; } +if test ${lt_cv_objdir+y} +then : + printf %s "(cached) " >&6 +else $as_nop + rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs +fi +rmdir .libs 2>/dev/null +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5 +printf "%s\n" "$lt_cv_objdir" >&6; } +objdir=$lt_cv_objdir + + + + + +printf "%s\n" "#define LT_OBJDIR \"$lt_cv_objdir/\"" >>confdefs.h + + + + +case $host_os in +aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. + if test set != "${COLLECT_NAMES+set}"; then + COLLECT_NAMES= + export COLLECT_NAMES + fi + ;; +esac + +# Global variables: +ofile=libtool +can_build_shared=yes + +# All known linkers require a '.a' archive for static linking (except MSVC and +# ICC, which need '.lib'). +libext=a + +with_gnu_ld=$lt_cv_prog_gnu_ld + +old_CC=$CC +old_CFLAGS=$CFLAGS + +# Set sane defaults for various variables +test -z "$CC" && CC=cc +test -z "$LTCC" && LTCC=$CC +test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS +test -z "$LD" && LD=ld +test -z "$ac_objext" && ac_objext=o + +func_cc_basename $compiler +cc_basename=$func_cc_basename_result + + +# Only perform the check for file, if the check method requires it +test -z "$MAGIC_CMD" && MAGIC_CMD=file +case $deplibs_check_method in +file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5 +printf %s "checking for ${ac_tool_prefix}file... " >&6; } +if test ${lt_cv_path_MAGIC_CMD+y} +then : + printf %s "(cached) " >&6 +else $as_nop + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD=$MAGIC_CMD + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/${ac_tool_prefix}file"; then + lt_cv_path_MAGIC_CMD=$ac_dir/"${ac_tool_prefix}file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD=$lt_cv_path_MAGIC_CMD + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS=$lt_save_ifs + MAGIC_CMD=$lt_save_MAGIC_CMD + ;; +esac +fi + +MAGIC_CMD=$lt_cv_path_MAGIC_CMD +if test -n "$MAGIC_CMD"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 +printf "%s\n" "$MAGIC_CMD" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + + + + +if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for file" >&5 +printf %s "checking for file... " >&6; } +if test ${lt_cv_path_MAGIC_CMD+y} +then : + printf %s "(cached) " >&6 +else $as_nop + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD=$MAGIC_CMD + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/file"; then + lt_cv_path_MAGIC_CMD=$ac_dir/"file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD=$lt_cv_path_MAGIC_CMD + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS=$lt_save_ifs + MAGIC_CMD=$lt_save_MAGIC_CMD + ;; +esac +fi + +MAGIC_CMD=$lt_cv_path_MAGIC_CMD +if test -n "$MAGIC_CMD"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 +printf "%s\n" "$MAGIC_CMD" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + + else + MAGIC_CMD=: + fi +fi + + fi + ;; +esac + +# Use C for the default configuration in the libtool script + +lt_save_CC=$CC +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +# Source file extension for C test sources. +ac_ext=c + +# Object file extension for compiled C test sources. +objext=o +objext=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}' + + + + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + +# Save the default compiler, since it gets overwritten when the other +# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. +compiler_DEFAULT=$CC + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* + +ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* + + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + +lt_prog_compiler_no_builtin_flag= + +if test yes = "$GCC"; then + case $cc_basename in + nvcc*) + lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;; + *) + lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;; + esac + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 +printf %s "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } +if test ${lt_cv_prog_compiler_rtti_exceptions+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_prog_compiler_rtti_exceptions=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="-fno-rtti -fno-exceptions" ## exclude from sc_useless_quotes_in_assignment + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_rtti_exceptions=yes + fi + fi + $RM conftest* + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 +printf "%s\n" "$lt_cv_prog_compiler_rtti_exceptions" >&6; } + +if test yes = "$lt_cv_prog_compiler_rtti_exceptions"; then + lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" +else + : +fi + +fi + + + + + + + lt_prog_compiler_wl= +lt_prog_compiler_pic= +lt_prog_compiler_static= + + + if test yes = "$GCC"; then + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_static='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test ia64 = "$host_cpu"; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + fi + lt_prog_compiler_pic='-fPIC' + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + lt_prog_compiler_pic='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the '-m68020' flag to GCC prevents building anything better, + # like '-m68040'. + lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + lt_prog_compiler_pic='-DDLL_EXPORT' + case $host_os in + os2*) + lt_prog_compiler_static='$wl-static' + ;; + esac + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic='-fno-common' + ;; + + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + lt_prog_compiler_static= + ;; + + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + ;; + + interix[3-9]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + lt_prog_compiler_can_build_shared=no + enable_shared=no + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic='-fPIC -shared' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic=-Kconform_pic + fi + ;; + + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + + case $cc_basename in + nvcc*) # Cuda Compiler Driver 2.2 + lt_prog_compiler_wl='-Xlinker ' + if test -n "$lt_prog_compiler_pic"; then + lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic" + fi + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + lt_prog_compiler_wl='-Wl,' + if test ia64 = "$host_cpu"; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + else + lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' + fi + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic='-fno-common' + case $cc_basename in + nagfor*) + # NAG Fortran compiler + lt_prog_compiler_wl='-Wl,-Wl,,' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + esac + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic='-DDLL_EXPORT' + case $host_os in + os2*) + lt_prog_compiler_static='$wl-static' + ;; + esac + ;; + + hpux9* | hpux10* | hpux11*) + lt_prog_compiler_wl='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + lt_prog_compiler_static='$wl-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + lt_prog_compiler_wl='-Wl,' + # PIC (with -KPIC) is the default. + lt_prog_compiler_static='-non_shared' + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + case $cc_basename in + # old Intel for x86_64, which still supported -KPIC. + ecc*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-static' + ;; + # icc used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + icc* | ifort*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fPIC' + lt_prog_compiler_static='-static' + ;; + # Lahey Fortran 8.1. + lf95*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='--shared' + lt_prog_compiler_static='--static' + ;; + nagfor*) + # NAG Fortran compiler + lt_prog_compiler_wl='-Wl,-Wl,,' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + tcc*) + # Fabrice Bellard et al's Tiny C Compiler + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fPIC' + lt_prog_compiler_static='-static' + ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fpic' + lt_prog_compiler_static='-Bstatic' + ;; + ccc*) + lt_prog_compiler_wl='-Wl,' + # All Alpha code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + xl* | bgxl* | bgf* | mpixl*) + # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-qpic' + lt_prog_compiler_static='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | $SED 5q` in + *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*) + # Sun Fortran 8.3 passes all unrecognized flags to the linker + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='' + ;; + *Sun\ F* | *Sun*Fortran*) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='-Qoption ld ' + ;; + *Sun\ C*) + # Sun C 5.9 + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='-Wl,' + ;; + *Intel*\ [CF]*Compiler*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fPIC' + lt_prog_compiler_static='-static' + ;; + *Portland\ Group*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fpic' + lt_prog_compiler_static='-Bstatic' + ;; + esac + ;; + esac + ;; + + newsos6) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic='-fPIC -shared' + ;; + + osf3* | osf4* | osf5*) + lt_prog_compiler_wl='-Wl,' + # All OSF/1 code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + + rdos*) + lt_prog_compiler_static='-non_shared' + ;; + + solaris*) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + case $cc_basename in + f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + lt_prog_compiler_wl='-Qoption ld ';; + *) + lt_prog_compiler_wl='-Wl,';; + esac + ;; + + sunos4*) + lt_prog_compiler_wl='-Qoption ld ' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic='-Kconform_pic' + lt_prog_compiler_static='-Bstatic' + fi + ;; + + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + unicos*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_can_build_shared=no + ;; + + uts4*) + lt_prog_compiler_pic='-pic' + lt_prog_compiler_static='-Bstatic' + ;; + + *) + lt_prog_compiler_can_build_shared=no + ;; + esac + fi + +case $host_os in + # For platforms that do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic= + ;; + *) + lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" + ;; +esac + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +printf %s "checking for $compiler option to produce PIC... " >&6; } +if test ${lt_cv_prog_compiler_pic+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_prog_compiler_pic=$lt_prog_compiler_pic +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 +printf "%s\n" "$lt_cv_prog_compiler_pic" >&6; } +lt_prog_compiler_pic=$lt_cv_prog_compiler_pic + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 +printf %s "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } +if test ${lt_cv_prog_compiler_pic_works+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_prog_compiler_pic_works=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic -DPIC" ## exclude from sc_useless_quotes_in_assignment + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_pic_works=yes + fi + fi + $RM conftest* + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5 +printf "%s\n" "$lt_cv_prog_compiler_pic_works" >&6; } + +if test yes = "$lt_cv_prog_compiler_pic_works"; then + case $lt_prog_compiler_pic in + "" | " "*) ;; + *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; + esac +else + lt_prog_compiler_pic= + lt_prog_compiler_can_build_shared=no +fi + +fi + + + + + + + + + + + +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +printf %s "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } +if test ${lt_cv_prog_compiler_static_works+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_prog_compiler_static_works=no + save_LDFLAGS=$LDFLAGS + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_static_works=yes + fi + else + lt_cv_prog_compiler_static_works=yes + fi + fi + $RM -r conftest* + LDFLAGS=$save_LDFLAGS + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5 +printf "%s\n" "$lt_cv_prog_compiler_static_works" >&6; } + +if test yes = "$lt_cv_prog_compiler_static_works"; then + : +else + lt_prog_compiler_static= +fi + + + + + + + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if test ${lt_cv_prog_compiler_c_o+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_prog_compiler_c_o=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 +printf "%s\n" "$lt_cv_prog_compiler_c_o" >&6; } + + + + + + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if test ${lt_cv_prog_compiler_c_o+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_prog_compiler_c_o=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 +printf "%s\n" "$lt_cv_prog_compiler_c_o" >&6; } + + + + +hard_links=nottested +if test no = "$lt_cv_prog_compiler_c_o" && test no != "$need_locks"; then + # do not overwrite the value of need_locks provided by the user + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 +printf %s "checking if we can lock with hard links... " >&6; } + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 +printf "%s\n" "$hard_links" >&6; } + if test no = "$hard_links"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5 +printf "%s\n" "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + + + + + + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +printf %s "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + + runpath_var= + allow_undefined_flag= + always_export_symbols=no + archive_cmds= + archive_expsym_cmds= + compiler_needs_object=no + enable_shared_with_static_runtimes=no + export_dynamic_flag_spec= + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + hardcode_automatic=no + hardcode_direct=no + hardcode_direct_absolute=no + hardcode_libdir_flag_spec= + hardcode_libdir_separator= + hardcode_minus_L=no + hardcode_shlibpath_var=unsupported + inherit_rpath=no + link_all_deplibs=unknown + module_cmds= + module_expsym_cmds= + old_archive_from_new_cmds= + old_archive_from_expsyms_cmds= + thread_safe_flag_spec= + whole_archive_flag_spec= + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + include_expsyms= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ' (' and ')$', so one must not match beginning or + # end of line. Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc', + # as well as any symbol that contains 'd'. + exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + # Exclude shared library initialization/finalization symbols. + extract_expsyms_cmds= + + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + # FIXME: the MSVC++ and ICC port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++ or Intel C++ Compiler. + if test yes != "$GCC"; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++ or ICC) + with_gnu_ld=yes + ;; + openbsd* | bitrig*) + with_gnu_ld=no + ;; + esac + + ld_shlibs=yes + + # On some targets, GNU ld is compatible enough with the native linker + # that we're better off using the native interface for both. + lt_use_gnu_ld_interface=no + if test yes = "$with_gnu_ld"; then + case $host_os in + aix*) + # The AIX port of GNU ld has always aspired to compatibility + # with the native linker. However, as the warning in the GNU ld + # block says, versions before 2.19.5* couldn't really create working + # shared libraries, regardless of the interface used. + case `$LD -v 2>&1` in + *\ \(GNU\ Binutils\)\ 2.19.5*) ;; + *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;; + *\ \(GNU\ Binutils\)\ [3-9]*) ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + fi + + if test yes = "$lt_use_gnu_ld_interface"; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='$wl' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + export_dynamic_flag_spec='$wl--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' + else + whole_archive_flag_spec= + fi + supports_anon_versioning=no + case `$LD -v | $SED -e 's/([^)]\+)\s\+//' 2>&1` in + *GNU\ gold*) supports_anon_versioning=yes ;; + *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix[3-9]*) + # On AIX/PPC, the GNU linker is very broken + if test ia64 != "$host_cpu"; then + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: the GNU linker, at least up to release 2.19, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to install binutils +*** 2.20 or above, or modify your PATH so that a non-GNU linker is found. +*** You will then need to restart the configuration process. + +_LT_EOF + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='' + ;; + m68k) + archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + esac + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + else + ld_shlibs=no + fi + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec='-L$libdir' + export_dynamic_flag_spec='$wl--export-all-symbols' + allow_undefined_flag=unsupported + always_export_symbols=no + enable_shared_with_static_runtimes=yes + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' + exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file, use it as + # is; otherwise, prepend EXPORTS... + archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs=no + fi + ;; + + haiku*) + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + link_all_deplibs=yes + ;; + + os2*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + allow_undefined_flag=unsupported + shrext_cmds=.dll + archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + prefix_cmds="$SED"~ + if test EXPORTS = "`$SED 1q $export_symbols`"; then + prefix_cmds="$prefix_cmds -e 1d"; + fi~ + prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ + cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' + enable_shared_with_static_runtimes=yes + file_list_spec='@' + ;; + + interix[3-9]*) + hardcode_direct=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='$wl-rpath,$libdir' + export_dynamic_flag_spec='$wl-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds='$SED "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) + tmp_diet=no + if test linux-dietlibc = "$host_os"; then + case $cc_basename in + diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) + esac + fi + if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ + && test no = "$tmp_diet" + then + tmp_addflag=' $pic_flag' + tmp_sharedflag='-shared' + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group f77 and f90 compilers + whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + lf95*) # Lahey Fortran 8.1 + whole_archive_flag_spec= + tmp_sharedflag='--shared' ;; + nagfor*) # NAGFOR 5.3 + tmp_sharedflag='-Wl,-shared' ;; + xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below) + tmp_sharedflag='-qmkshrobj' + tmp_addflag= ;; + nvcc*) # Cuda Compiler Driver 2.2 + whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + compiler_needs_object=yes + ;; + esac + case `$CC -V 2>&1 | $SED 5q` in + *Sun\ C*) # Sun C 5.9 + whole_archive_flag_spec='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + compiler_needs_object=yes + tmp_sharedflag='-G' ;; + *Sun\ F*) # Sun Fortran 8.3 + tmp_sharedflag='-G' ;; + esac + archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + + if test yes = "$supports_anon_versioning"; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' + fi + + case $cc_basename in + tcc*) + export_dynamic_flag_spec='-rdynamic' + ;; + xlf* | bgf* | bgxlf* | mpixlf*) + # IBM XL Fortran 10.1 on PPC cannot create shared libs itself + whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test yes = "$supports_anon_versioning"; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac + else + ld_shlibs=no + fi + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + # For security reasons, it is highly recommended that you always + # use absolute paths for naming shared libraries, and exclude the + # DT_RUNPATH tag from executables and libraries. But doing so + # requires that you compile everything twice, which is a pain. + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + ;; + + sunos4*) + archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + + if test no = "$ld_shlibs"; then + runpath_var= + hardcode_libdir_flag_spec= + export_dynamic_flag_spec= + whole_archive_flag_spec= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + allow_undefined_flag=unsupported + always_export_symbols=yes + archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + hardcode_minus_L=yes + if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + hardcode_direct=unsupported + fi + ;; + + aix[4-9]*) + if test ia64 = "$host_cpu"; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag= + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to GNU nm, but means don't demangle to AIX nm. + # Without the "-l" option, or with the "-B" option, AIX nm treats + # weak defined symbols like other global defined symbols, whereas + # GNU nm marks them as "W". + # While the 'weak' keyword is ignored in the Export File, we need + # it in the Import File for the 'aix-soname' feature, so we have + # to replace the "-B" option with "-P" for AIX nm. + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # have runtime linking enabled, and use it for executables. + # For shared libraries, we enable/disable runtime linking + # depending on the kind of the shared library created - + # when "with_aix_soname,aix_use_runtimelinking" is: + # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables + # "aix,yes" lib.so shared, rtl:yes, for executables + # lib.a static archive + # "both,no" lib.so.V(shr.o) shared, rtl:yes + # lib.a(lib.so.V) shared, rtl:no, for executables + # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables + # lib.a(lib.so.V) shared, rtl:no + # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables + # lib.a static archive + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) + for ld_flag in $LDFLAGS; do + if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then + aix_use_runtimelinking=yes + break + fi + done + if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then + # With aix-soname=svr4, we create the lib.so.V shared archives only, + # so we don't have lib.a shared libs to link our executables. + # We have to force runtime linking in this case. + aix_use_runtimelinking=yes + LDFLAGS="$LDFLAGS -Wl,-brtl" + fi + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds='' + hardcode_direct=yes + hardcode_direct_absolute=yes + hardcode_libdir_separator=':' + link_all_deplibs=yes + file_list_spec='$wl-f,' + case $with_aix_soname,$aix_use_runtimelinking in + aix,*) ;; # traditional, no import file + svr4,* | *,yes) # use import file + # The Import File defines what to hardcode. + hardcode_direct=no + hardcode_direct_absolute=no + ;; + esac + + if test yes = "$GCC"; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`$CC -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + hardcode_direct=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L=yes + hardcode_libdir_flag_spec='-L$libdir' + hardcode_libdir_separator= + fi + ;; + esac + shared_flag='-shared' + if test yes = "$aix_use_runtimelinking"; then + shared_flag="$shared_flag "'$wl-G' + fi + # Need to ensure runtime linking is disabled for the traditional + # shared library, or the linker may eventually find shared libraries + # /with/ Import File - we do not want to mix them. + shared_flag_aix='-shared' + shared_flag_svr4='-shared $wl-G' + else + # not using gcc + if test ia64 = "$host_cpu"; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test yes = "$aix_use_runtimelinking"; then + shared_flag='$wl-G' + else + shared_flag='$wl-bM:SRE' + fi + shared_flag_aix='$wl-bM:SRE' + shared_flag_svr4='$wl-G' + fi + fi + + export_dynamic_flag_spec='$wl-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols=yes + if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. + if test set = "${lt_cv_aix_libpath+set}"; then + aix_libpath=$lt_cv_aix_libpath +else + if test ${lt_cv_aix_libpath_+y} +then : + printf %s "(cached) " >&6 +else $as_nop + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=/usr/lib:/lib + fi + +fi + + aix_libpath=$lt_cv_aix_libpath_ +fi + + hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag + else + if test ia64 = "$host_cpu"; then + hardcode_libdir_flag_spec='$wl-R $libdir:/usr/lib:/lib' + allow_undefined_flag="-z nodefs" + archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + if test set = "${lt_cv_aix_libpath+set}"; then + aix_libpath=$lt_cv_aix_libpath +else + if test ${lt_cv_aix_libpath_+y} +then : + printf %s "(cached) " >&6 +else $as_nop + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=/usr/lib:/lib + fi + +fi + + aix_libpath=$lt_cv_aix_libpath_ +fi + + hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag=' $wl-bernotok' + allow_undefined_flag=' $wl-berok' + if test yes = "$with_gnu_ld"; then + # We only use this code for GNU lds that support --whole-archive. + whole_archive_flag_spec='$wl--whole-archive$convenience $wl--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec='$convenience' + fi + archive_cmds_need_lc=yes + archive_expsym_cmds='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' + # -brtl affects multiple linker settings, -berok does not and is overridden later + compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`' + if test svr4 != "$with_aix_soname"; then + # This is similar to how AIX traditionally builds its shared libraries. + archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' + fi + if test aix != "$with_aix_soname"; then + archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' + else + # used by -dlpreopen to get the symbols + archive_expsym_cmds="$archive_expsym_cmds"'~$MV $output_objdir/$realname.d/$soname $output_objdir' + fi + archive_expsym_cmds="$archive_expsym_cmds"'~$RM -r $output_objdir/$realname.d' + fi + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='' + ;; + m68k) + archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + esac + ;; + + bsdi[45]*) + export_dynamic_flag_spec=-rdynamic + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++ or Intel C++ Compiler. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + case $cc_basename in + cl* | icl*) + # Native MSVC or ICC + hardcode_libdir_flag_spec=' ' + allow_undefined_flag=unsupported + always_export_symbols=yes + file_list_spec='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=.dll + # FIXME: Setting linknames here is a bad hack. + archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' + archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then + cp "$export_symbols" "$output_objdir/$soname.def"; + echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; + else + $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, )='true' + enable_shared_with_static_runtimes=yes + exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' + # Don't use ranlib + old_postinstall_cmds='chmod 644 $oldlib' + postlink_cmds='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile=$lt_outputfile.exe + lt_tool_outputfile=$lt_tool_outputfile.exe + ;; + esac~ + if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # Assume MSVC and ICC wrapper + hardcode_libdir_flag_spec=' ' + allow_undefined_flag=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=.dll + # FIXME: Setting linknames here is a bad hack. + archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + old_archive_from_new_cmds='true' + # FIXME: Should let the user specify the lib program. + old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' + enable_shared_with_static_runtimes=yes + ;; + esac + ;; + + darwin* | rhapsody*) + + + archive_cmds_need_lc=no + hardcode_direct=no + hardcode_automatic=yes + hardcode_shlibpath_var=unsupported + if test yes = "$lt_cv_ld_force_load"; then + whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' + + else + whole_archive_flag_spec='' + fi + link_all_deplibs=yes + allow_undefined_flag=$_lt_dar_allow_undefined + case $cc_basename in + ifort*|nagfor*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac + if test yes = "$_lt_dar_can_shared"; then + output_verbose_link_cmd=func_echo_all + archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" + module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" + archive_expsym_cmds="$SED 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" + module_expsym_cmds="$SED -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" + + else + ld_shlibs=no + fi + + ;; + + dgux*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2.*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly* | midnightbsd*) + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + hpux9*) + if test yes = "$GCC"; then + archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' + else + archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' + fi + hardcode_libdir_flag_spec='$wl+b $wl$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + export_dynamic_flag_spec='$wl-E' + ;; + + hpux10*) + if test yes,no = "$GCC,$with_gnu_ld"; then + archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test no = "$with_gnu_ld"; then + hardcode_libdir_flag_spec='$wl+b $wl$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + hardcode_direct_absolute=yes + export_dynamic_flag_spec='$wl-E' + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + fi + ;; + + hpux11*) + if test yes,no = "$GCC,$with_gnu_ld"; then + case $host_cpu in + hppa*64*) + archive_cmds='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + archive_cmds='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + + # Older versions of the 11.00 compiler do not understand -b yet + # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5 +printf %s "checking if $CC understands -b... " >&6; } +if test ${lt_cv_prog_compiler__b+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_prog_compiler__b=no + save_LDFLAGS=$LDFLAGS + LDFLAGS="$LDFLAGS -b" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler__b=yes + fi + else + lt_cv_prog_compiler__b=yes + fi + fi + $RM -r conftest* + LDFLAGS=$save_LDFLAGS + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5 +printf "%s\n" "$lt_cv_prog_compiler__b" >&6; } + +if test yes = "$lt_cv_prog_compiler__b"; then + archive_cmds='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' +else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' +fi + + ;; + esac + fi + if test no = "$with_gnu_ld"; then + hardcode_libdir_flag_spec='$wl+b $wl$libdir' + hardcode_libdir_separator=: + + case $host_cpu in + hppa*64*|ia64*) + hardcode_direct=no + hardcode_shlibpath_var=no + ;; + *) + hardcode_direct=yes + hardcode_direct_absolute=yes + export_dynamic_flag_spec='$wl-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test yes = "$GCC"; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. + # This should be the same for all languages, so no per-tag cache variable. + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 +printf %s "checking whether the $host_os linker accepts -exported_symbol... " >&6; } +if test ${lt_cv_irix_exported_symbol+y} +then : + printf %s "(cached) " >&6 +else $as_nop + save_LDFLAGS=$LDFLAGS + LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +int foo (void) { return 0; } +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + lt_cv_irix_exported_symbol=yes +else $as_nop + lt_cv_irix_exported_symbol=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 +printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; } + if test yes = "$lt_cv_irix_exported_symbol"; then + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib' + fi + else + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib' + fi + archive_cmds_need_lc='no' + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + hardcode_libdir_separator=: + inherit_rpath=yes + link_all_deplibs=yes + ;; + + linux*) + case $cc_basename in + tcc*) + # Fabrice Bellard et al's Tiny C Compiler + ld_shlibs=yes + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + newsos6) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + hardcode_libdir_separator=: + hardcode_shlibpath_var=no + ;; + + *nto* | *qnx*) + ;; + + openbsd* | bitrig*) + if test -f /usr/libexec/ld.so; then + hardcode_direct=yes + hardcode_shlibpath_var=no + hardcode_direct_absolute=yes + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols' + hardcode_libdir_flag_spec='$wl-rpath,$libdir' + export_dynamic_flag_spec='$wl-E' + else + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='$wl-rpath,$libdir' + fi + else + ld_shlibs=no + fi + ;; + + os2*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + allow_undefined_flag=unsupported + shrext_cmds=.dll + archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + prefix_cmds="$SED"~ + if test EXPORTS = "`$SED 1q $export_symbols`"; then + prefix_cmds="$prefix_cmds -e 1d"; + fi~ + prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ + cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' + enable_shared_with_static_runtimes=yes + file_list_spec='@' + ;; + + osf3*) + if test yes = "$GCC"; then + allow_undefined_flag=' $wl-expect_unresolved $wl\*' + archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + fi + archive_cmds_need_lc='no' + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + hardcode_libdir_separator=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test yes = "$GCC"; then + allow_undefined_flag=' $wl-expect_unresolved $wl\*' + archive_cmds='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp' + + # Both c and cxx compiler support -rpath directly + hardcode_libdir_flag_spec='-rpath $libdir' + fi + archive_cmds_need_lc='no' + hardcode_libdir_separator=: + ;; + + solaris*) + no_undefined_flag=' -z defs' + if test yes = "$GCC"; then + wlarc='$wl' + archive_cmds='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) + wlarc='' + archive_cmds='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' + ;; + *) + wlarc='$wl' + archive_cmds='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + ;; + esac + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_shlibpath_var=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands '-z linker_flag'. GCC discards it without '$wl', + # but is careful enough not to reorder. + # Supported since Solaris 2.6 (maybe 2.5.1?) + if test yes = "$GCC"; then + whole_archive_flag_spec='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' + else + whole_archive_flag_spec='-z allextract$convenience -z defaultextract' + fi + ;; + esac + link_all_deplibs=yes + ;; + + sunos4*) + if test sequent = "$host_vendor"; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + archive_cmds='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + hardcode_libdir_flag_spec='-L$libdir' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + sysv4) + case $host_vendor in + sni) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' + reload_cmds='$CC -r -o $output$reload_objs' + hardcode_direct=no + ;; + motorola) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + hardcode_shlibpath_var=no + ;; + + sysv4.3*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + export_dynamic_flag_spec='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ld_shlibs=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) + no_undefined_flag='$wl-z,text' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + runpath_var='LD_RUN_PATH' + + if test yes = "$GCC"; then + archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We CANNOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + no_undefined_flag='$wl-z,text' + allow_undefined_flag='$wl-z,nodefs' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='$wl-R,$libdir' + hardcode_libdir_separator=':' + link_all_deplibs=yes + export_dynamic_flag_spec='$wl-Bexport' + runpath_var='LD_RUN_PATH' + + if test yes = "$GCC"; then + archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + *) + ld_shlibs=no + ;; + esac + + if test sni = "$host_vendor"; then + case $host in + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + export_dynamic_flag_spec='$wl-Blargedynsym' + ;; + esac + fi + fi + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5 +printf "%s\n" "$ld_shlibs" >&6; } +test no = "$ld_shlibs" && can_build_shared=no + +with_gnu_ld=$with_gnu_ld + + + + + + + + + + + + + + + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc=yes + + if test yes,yes = "$GCC,$enable_shared"; then + case $archive_cmds in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 +printf %s "checking whether -lc should be explicitly linked in... " >&6; } +if test ${lt_cv_archive_cmds_need_lc+y} +then : + printf %s "(cached) " >&6 +else $as_nop + $RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl + pic_flag=$lt_prog_compiler_pic + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag + allow_undefined_flag= + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 + (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + then + lt_cv_archive_cmds_need_lc=no + else + lt_cv_archive_cmds_need_lc=yes + fi + allow_undefined_flag=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5 +printf "%s\n" "$lt_cv_archive_cmds_need_lc" >&6; } + archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc + ;; + esac + fi + ;; +esac + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 +printf %s "checking dynamic linker characteristics... " >&6; } + +if test yes = "$GCC"; then + case $host_os in + darwin*) lt_awk_arg='/^libraries:/,/LR/' ;; + *) lt_awk_arg='/^libraries:/' ;; + esac + case $host_os in + mingw* | cegcc*) lt_sed_strip_eq='s|=\([A-Za-z]:\)|\1|g' ;; + *) lt_sed_strip_eq='s|=/|/|g' ;; + esac + lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` + case $lt_search_path_spec in + *\;*) + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` + ;; + *) + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` + ;; + esac + # Ok, now we have the path, separated by spaces, we can step through it + # and add multilib dir if necessary... + lt_tmp_lt_search_path_spec= + lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` + # ...but if some path component already ends with the multilib dir we assume + # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer). + case "$lt_multi_os_dir; $lt_search_path_spec " in + "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*) + lt_multi_os_dir= + ;; + esac + for lt_sys_path in $lt_search_path_spec; do + if test -d "$lt_sys_path$lt_multi_os_dir"; then + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir" + elif test -n "$lt_multi_os_dir"; then + test -d "$lt_sys_path" && \ + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" + fi + done + lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' +BEGIN {RS = " "; FS = "/|\n";} { + lt_foo = ""; + lt_count = 0; + for (lt_i = NF; lt_i > 0; lt_i--) { + if ($lt_i != "" && $lt_i != ".") { + if ($lt_i == "..") { + lt_count++; + } else { + if (lt_count == 0) { + lt_foo = "/" $lt_i lt_foo; + } else { + lt_count--; + } + } + } + } + if (lt_foo != "") { lt_freq[lt_foo]++; } + if (lt_freq[lt_foo] == 1) { print lt_foo; } +}'` + # AWK program above erroneously prepends '/' to C:/dos/paths + # for these hosts. + case $host_os in + mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ + $SED 's|/\([A-Za-z]:\)|\1|g'` ;; + esac + sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=.so +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + + + +case $host_os in +aix3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='$libname$release$shared_ext$major' + ;; + +aix[4-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test ia64 = "$host_cpu"; then + # AIX 5 supports IA64 + library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line '#! .'. This would cause the generated library to + # depend on '.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # Using Import Files as archive members, it is possible to support + # filename-based versioning of shared library archives on AIX. While + # this would work for both with and without runtime linking, it will + # prevent static linking of such archives. So we do filename-based + # shared library versioning with .so extension only, which is used + # when both runtime linking and shared linking is enabled. + # Unfortunately, runtime linking may impact performance, so we do + # not want this to be the default eventually. Also, we use the + # versioned .so libs for executables only if there is the -brtl + # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. + # To allow for filename-based versioning support, we need to create + # libNAME.so.V as an archive file, containing: + # *) an Import File, referring to the versioned filename of the + # archive as well as the shared archive member, telling the + # bitwidth (32 or 64) of that shared object, and providing the + # list of exported symbols of that shared object, eventually + # decorated with the 'weak' keyword + # *) the shared object with the F_LOADONLY flag set, to really avoid + # it being seen by the linker. + # At run time we better use the real file rather than another symlink, + # but for link time we create the symlink libNAME.so -> libNAME.so.V + + case $with_aix_soname,$aix_use_runtimelinking in + # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + aix,yes) # traditional libtool + dynamic_linker='AIX unversionable lib.so' + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + ;; + aix,no) # traditional AIX only + dynamic_linker='AIX lib.a(lib.so.V)' + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='$libname$release.a $libname.a' + soname_spec='$libname$release$shared_ext$major' + ;; + svr4,*) # full svr4 only + dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)" + library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' + # We do not specify a path in Import Files, so LIBPATH fires. + shlibpath_overrides_runpath=yes + ;; + *,yes) # both, prefer svr4 + dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)" + library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' + # unpreferred sharedlib libNAME.a needs extra handling + postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' + postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' + # We do not specify a path in Import Files, so LIBPATH fires. + shlibpath_overrides_runpath=yes + ;; + *,no) # both, prefer aix + dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)" + library_names_spec='$libname$release.a $libname.a' + soname_spec='$libname$release$shared_ext$major' + # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling + postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' + postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' + ;; + esac + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + +beos*) + library_names_spec='$libname$shared_ext' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32* | cegcc*) + version_type=windows + shrext_cmds=.dll + need_version=no + need_lib_prefix=no + + case $GCC,$cc_basename in + yes,*) + # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo $libname | $SED -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api" + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo $libname | $SED -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + ;; + esac + dynamic_linker='Win32 ld.exe' + ;; + + *,cl* | *,icl*) + # Native MSVC or ICC + libname_spec='$name' + soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + library_names_spec='$libname.dll.lib' + + case $build_os in + mingw*) + sys_lib_search_path_spec= + lt_save_ifs=$IFS + IFS=';' + for lt_path in $LIB + do + IFS=$lt_save_ifs + # Let DOS variable expansion print the short 8.3 style file name. + lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` + sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" + done + IFS=$lt_save_ifs + # Convert to MSYS style. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` + ;; + cygwin*) + # Convert to unix form, then to dos form, then back to unix form + # but this time dos style (no spaces!) so that the unix form looks + # like /cygdrive/c/PROGRA~1:/cygdr... + sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` + sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` + sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + ;; + *) + sys_lib_search_path_spec=$LIB + if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # FIXME: find the short name or the path components, as spaces are + # common. (e.g. "Program Files" -> "PROGRA~1") + ;; + esac + + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + dynamic_linker='Win32 link.exe' + ;; + + *) + # Assume MSVC and ICC wrapper + library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib' + dynamic_linker='Win32 ld.exe' + ;; + esac + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' + soname_spec='$libname$release$major$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib" + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd* | dragonfly* | midnightbsd*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[23].*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2.*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + *) # from 4.6 on, and DragonFly + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +haiku*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + dynamic_linker="$host_os runtime_loader" + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=no + sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + if test 32 = "$HPUX_IA64_MODE"; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + sys_lib_dlsearch_path_spec=/usr/lib/hpux32 + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + sys_lib_dlsearch_path_spec=/usr/lib/hpux64 + fi + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555, ... + postinstall_cmds='chmod 555 $lib' + # or fails outright, so override atomically: + install_override_mode=555 + ;; + +interix[3-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test yes = "$lt_cv_prog_gnu_ld"; then + version_type=linux # correct to gnu/linux during the next big refactor + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='$libname$release$shared_ext$major' + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" + sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +linux*android*) + version_type=none # Android doesn't support versioned libraries. + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext' + soname_spec='$libname$release$shared_ext' + finish_cmds= + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + dynamic_linker='Android linker' + # Don't embed -rpath directories since the linker doesn't support them. + hardcode_libdir_flag_spec='-L$libdir' + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + + # Some binutils ld are patched to set DT_RUNPATH + if test ${lt_cv_shlibpath_overrides_runpath+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_shlibpath_overrides_runpath=no + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ + LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null +then : + lt_cv_shlibpath_overrides_runpath=yes +fi +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir + +fi + + shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Ideally, we could use ldconfig to report *all* directores which are + # searched for libraries, however this is still not possible. Aside from not + # being certain /sbin/ldconfig is available, command + # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, + # even though it is searched at run-time. Try to do the best guess by + # appending ld.so.conf contents (and includes) to the search path. + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +*nto* | *qnx*) + version_type=qnx + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; + +openbsd* | bitrig*) + version_type=sunos + sys_lib_dlsearch_path_spec=/usr/lib + need_lib_prefix=no + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then + need_version=no + else + need_version=yes + fi + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +os2*) + libname_spec='$name' + version_type=windows + shrext_cmds=.dll + need_version=no + need_lib_prefix=no + # OS/2 can only load a DLL with a base name of 8 characters or less. + soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; + v=$($ECHO $release$versuffix | tr -d .-); + n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); + $ECHO $n$v`$shared_ext' + library_names_spec='${libname}_dll.$libext' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=BEGINLIBPATH + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='$libname$release$shared_ext$major' + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + +rdos*) + dynamic_linker=no + ;; + +solaris*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test yes = "$with_gnu_ld"; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec; then + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' + soname_spec='$libname$shared_ext.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=sco + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + if test yes = "$with_gnu_ld"; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +uts4*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 +printf "%s\n" "$dynamic_linker" >&6; } +test no = "$dynamic_linker" && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test yes = "$GCC"; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then + sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec +fi + +if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then + sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec +fi + +# remember unaugmented sys_lib_dlsearch_path content for libtool script decls... +configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec + +# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code +func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" + +# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool +configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 +printf %s "checking how to hardcode library paths into programs... " >&6; } +hardcode_action= +if test -n "$hardcode_libdir_flag_spec" || + test -n "$runpath_var" || + test yes = "$hardcode_automatic"; then + + # We can hardcode non-existent directories. + if test no != "$hardcode_direct" && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, )" && + test no != "$hardcode_minus_L"; then + # Linking always hardcodes the temporary library directory. + hardcode_action=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action=unsupported +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5 +printf "%s\n" "$hardcode_action" >&6; } + +if test relink = "$hardcode_action" || + test yes = "$inherit_rpath"; then + # Fast installation is not supported + enable_fast_install=no +elif test yes = "$shlibpath_overrides_runpath" || + test no = "$enable_shared"; then + # Fast installation is not necessary + enable_fast_install=needless +fi + + + + + + + if test yes != "$enable_dlopen"; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen=load_add_on + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32* | cegcc*) + lt_cv_dlopen=LoadLibrary + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen=dlopen + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 +printf %s "checking for dlopen in -ldl... " >&6; } +if test ${ac_cv_lib_dl_dlopen+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +char dlopen (); +int +main (void) +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + ac_cv_lib_dl_dlopen=yes +else $as_nop + ac_cv_lib_dl_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 +printf "%s\n" "$ac_cv_lib_dl_dlopen" >&6; } +if test "x$ac_cv_lib_dl_dlopen" = xyes +then : + lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl +else $as_nop + + lt_cv_dlopen=dyld + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + +fi + + ;; + + tpf*) + # Don't try to run any link tests for TPF. We know it's impossible + # because TPF is a cross-compiler, and we know how we open DSOs. + lt_cv_dlopen=dlopen + lt_cv_dlopen_libs= + lt_cv_dlopen_self=no + ;; + + *) + ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load" +if test "x$ac_cv_func_shl_load" = xyes +then : + lt_cv_dlopen=shl_load +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 +printf %s "checking for shl_load in -ldld... " >&6; } +if test ${ac_cv_lib_dld_shl_load+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +char shl_load (); +int +main (void) +{ +return shl_load (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + ac_cv_lib_dld_shl_load=yes +else $as_nop + ac_cv_lib_dld_shl_load=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 +printf "%s\n" "$ac_cv_lib_dld_shl_load" >&6; } +if test "x$ac_cv_lib_dld_shl_load" = xyes +then : + lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld +else $as_nop + ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen" +if test "x$ac_cv_func_dlopen" = xyes +then : + lt_cv_dlopen=dlopen +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 +printf %s "checking for dlopen in -ldl... " >&6; } +if test ${ac_cv_lib_dl_dlopen+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +char dlopen (); +int +main (void) +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + ac_cv_lib_dl_dlopen=yes +else $as_nop + ac_cv_lib_dl_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 +printf "%s\n" "$ac_cv_lib_dl_dlopen" >&6; } +if test "x$ac_cv_lib_dl_dlopen" = xyes +then : + lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 +printf %s "checking for dlopen in -lsvld... " >&6; } +if test ${ac_cv_lib_svld_dlopen+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsvld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +char dlopen (); +int +main (void) +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + ac_cv_lib_svld_dlopen=yes +else $as_nop + ac_cv_lib_svld_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 +printf "%s\n" "$ac_cv_lib_svld_dlopen" >&6; } +if test "x$ac_cv_lib_svld_dlopen" = xyes +then : + lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 +printf %s "checking for dld_link in -ldld... " >&6; } +if test ${ac_cv_lib_dld_dld_link+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +char dld_link (); +int +main (void) +{ +return dld_link (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + ac_cv_lib_dld_dld_link=yes +else $as_nop + ac_cv_lib_dld_dld_link=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 +printf "%s\n" "$ac_cv_lib_dld_dld_link" >&6; } +if test "x$ac_cv_lib_dld_dld_link" = xyes +then : + lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld +fi + + +fi + + +fi + + +fi + + +fi + + +fi + + ;; + esac + + if test no = "$lt_cv_dlopen"; then + enable_dlopen=no + else + enable_dlopen=yes + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS=$CPPFLAGS + test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS=$LDFLAGS + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS=$LIBS + LIBS="$lt_cv_dlopen_libs $LIBS" + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5 +printf %s "checking whether a program can dlopen itself... " >&6; } +if test ${lt_cv_dlopen_self+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test yes = "$cross_compiling"; then : + lt_cv_dlopen_self=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +#line $LINENO "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +/* When -fvisibility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +int fnord () __attribute__((visibility("default"))); +#endif + +int fnord () { return 42; } +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else + { + if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + else puts (dlerror ()); + } + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +} +_LT_EOF + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self=no + fi +fi +rm -fr conftest* + + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5 +printf "%s\n" "$lt_cv_dlopen_self" >&6; } + + if test yes = "$lt_cv_dlopen_self"; then + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5 +printf %s "checking whether a statically linked program can dlopen itself... " >&6; } +if test ${lt_cv_dlopen_self_static+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test yes = "$cross_compiling"; then : + lt_cv_dlopen_self_static=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +#line $LINENO "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +/* When -fvisibility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +int fnord () __attribute__((visibility("default"))); +#endif + +int fnord () { return 42; } +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else + { + if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + else puts (dlerror ()); + } + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +} +_LT_EOF + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self_static=no + fi +fi +rm -fr conftest* + + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5 +printf "%s\n" "$lt_cv_dlopen_self_static" >&6; } + fi + + CPPFLAGS=$save_CPPFLAGS + LDFLAGS=$save_LDFLAGS + LIBS=$save_LIBS + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi + + + + + + + + + + + + + + + + + +striplib= +old_striplib= +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5 +printf %s "checking whether stripping libraries is possible... " >&6; } +if test -z "$STRIP"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +else + if $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then + old_striplib="$STRIP --strip-debug" + striplib="$STRIP --strip-unneeded" + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } + else + case $host_os in + darwin*) + # FIXME - insert some real tests, host_os isn't really good enough + striplib="$STRIP -x" + old_striplib="$STRIP -S" + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } + ;; + freebsd*) + if $STRIP -V 2>&1 | $GREP "elftoolchain" >/dev/null; then + old_striplib="$STRIP --strip-debug" + striplib="$STRIP --strip-unneeded" + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } + else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } + fi + ;; + *) + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } + ;; + esac + fi +fi + + + + + + + + + + + + + # Report what library types will actually be built + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5 +printf %s "checking if libtool supports shared libraries... " >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5 +printf "%s\n" "$can_build_shared" >&6; } + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5 +printf %s "checking whether to build shared libraries... " >&6; } + test no = "$can_build_shared" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test yes = "$enable_shared" && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + + aix[4-9]*) + if test ia64 != "$host_cpu"; then + case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in + yes,aix,yes) ;; # shared object as lib.so file only + yes,svr4,*) ;; # shared object as lib.so archive member only + yes,*) enable_static=no ;; # shared object in lib.a archive as well + esac + fi + ;; + esac + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5 +printf "%s\n" "$enable_shared" >&6; } + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5 +printf %s "checking whether to build static libraries... " >&6; } + # Make sure either enable_shared or enable_static is yes. + test yes = "$enable_shared" || enable_static=yes + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5 +printf "%s\n" "$enable_static" >&6; } + + + + +fi +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +CC=$lt_save_CC + + + + + + + + + + + + + + + + ac_config_commands="$ac_config_commands libtool" + + + + +# Only expand once: + + + +ac_fn_c_check_func "$LINENO" "getaddrinfo" "ac_cv_func_getaddrinfo" +if test "x$ac_cv_func_getaddrinfo" = xyes +then : + printf "%s\n" "#define HAS_GETADDRINFO 1" >>confdefs.h + +fi + +ac_fn_c_check_func "$LINENO" "getnameinfo" "ac_cv_func_getnameinfo" +if test "x$ac_cv_func_getnameinfo" = xyes +then : + printf "%s\n" "#define HAS_GETNAMEINFO 1" >>confdefs.h + +fi + +ac_fn_c_check_func "$LINENO" "gethostbyaddr_r" "ac_cv_func_gethostbyaddr_r" +if test "x$ac_cv_func_gethostbyaddr_r" = xyes +then : + printf "%s\n" "#define HAS_GETHOSTBYADDR_R 1" >>confdefs.h + +fi + +ac_fn_c_check_func "$LINENO" "gethostbyname_r" "ac_cv_func_gethostbyname_r" +if test "x$ac_cv_func_gethostbyname_r" = xyes +then : + printf "%s\n" "#define HAS_GETHOSTBYNAME_R 1" >>confdefs.h + +fi + +ac_fn_c_check_func "$LINENO" "poll" "ac_cv_func_poll" +if test "x$ac_cv_func_poll" = xyes +then : + printf "%s\n" "#define HAS_POLL 1" >>confdefs.h + +fi + +ac_fn_c_check_func "$LINENO" "fcntl" "ac_cv_func_fcntl" +if test "x$ac_cv_func_fcntl" = xyes +then : + printf "%s\n" "#define HAS_FCNTL 1" >>confdefs.h + +fi + +ac_fn_c_check_func "$LINENO" "inet_pton" "ac_cv_func_inet_pton" +if test "x$ac_cv_func_inet_pton" = xyes +then : + printf "%s\n" "#define HAS_INET_PTON 1" >>confdefs.h + +fi + +ac_fn_c_check_func "$LINENO" "inet_ntop" "ac_cv_func_inet_ntop" +if test "x$ac_cv_func_inet_ntop" = xyes +then : + printf "%s\n" "#define HAS_INET_NTOP 1" >>confdefs.h + +fi + + +ac_fn_c_check_member "$LINENO" "struct msghdr" "msg_flags" "ac_cv_member_struct_msghdr_msg_flags" "#include +" +if test "x$ac_cv_member_struct_msghdr_msg_flags" = xyes +then : + printf "%s\n" "#define HAS_MSGHDR_FLAGS 1" >>confdefs.h + +fi + + +ac_fn_c_check_type "$LINENO" "socklen_t" "ac_cv_type_socklen_t" "#include + #include + +" +if test "x$ac_cv_type_socklen_t" = xyes +then : + printf "%s\n" "#define HAS_SOCKLEN_T 1" >>confdefs.h + +fi + + +ac_config_files="$ac_config_files Makefile libenet.pc" + +cat >confcache <<\_ACEOF +# This file is a shell script that caches the results of configure +# tests run on this system so they can be shared between configure +# scripts and configure runs, see configure's option --config-cache. +# It is not useful on other systems. If it contains results you don't +# want to keep, you may remove or edit it. +# +# config.status only pays attention to the cache file if you give it +# the --recheck option to rerun configure. +# +# `ac_cv_env_foo' variables (set or unset) will be overridden when +# loading this file, other *unset* `ac_cv_foo' will be assigned the +# following values. + +_ACEOF + +# The following way of writing the cache mishandles newlines in values, +# but we know of no workaround that is simple, portable, and efficient. +# So, we kill variables containing newlines. +# Ultrix sh set writes to stderr and can't be redirected directly, +# and sets the high bit in the cache file unless we assign to the vars. +( + for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + + (set) 2>&1 | + case $as_nl`(ac_space=' '; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + # `set' does not quote correctly, so add quotes: double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \. + sed -n \ + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + ;; #( + *) + # `set' quotes correctly as required by POSIX, so do not add quotes. + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) | + sed ' + /^ac_cv_env_/b end + t clear + :clear + s/^\([^=]*\)=\(.*[{}].*\)$/test ${\1+y} || &/ + t end + s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + :end' >>confcache +if diff "$cache_file" confcache >/dev/null 2>&1; then :; else + if test -w "$cache_file"; then + if test "x$cache_file" != "x/dev/null"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 +printf "%s\n" "$as_me: updating cache $cache_file" >&6;} + if test ! -f "$cache_file" || test -h "$cache_file"; then + cat confcache >"$cache_file" + else + case $cache_file in #( + */* | ?:*) + mv -f confcache "$cache_file"$$ && + mv -f "$cache_file"$$ "$cache_file" ;; #( + *) + mv -f confcache "$cache_file" ;; + esac + fi + fi + else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 +printf "%s\n" "$as_me: not updating unwritable cache $cache_file" >&6;} + fi +fi +rm -f confcache + +test "x$prefix" = xNONE && prefix=$ac_default_prefix +# Let make expand exec_prefix. +test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' + +# Transform confdefs.h into DEFS. +# Protect against shell expansion while executing Makefile rules. +# Protect against Makefile macro expansion. +# +# If the first sed substitution is executed (which looks for macros that +# take arguments), then branch to the quote section. Otherwise, +# look for a macro that doesn't take arguments. +ac_script=' +:mline +/\\$/{ + N + s,\\\n,, + b mline +} +t clear +:clear +s/^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*([^)]*)\)[ ]*\(.*\)/-D\1=\2/g +t quote +s/^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\)/-D\1=\2/g +t quote +b any +:quote +s/[ `~#$^&*(){}\\|;'\''"<>?]/\\&/g +s/\[/\\&/g +s/\]/\\&/g +s/\$/$$/g +H +:any +${ + g + s/^\n// + s/\n/ /g + p +} +' +DEFS=`sed -n "$ac_script" confdefs.h` + + +ac_libobjs= +ac_ltlibobjs= +U= +for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' + ac_i=`printf "%s\n" "$ac_i" | sed "$ac_script"` + # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR + # will be set to the directory where LIBOBJS objects are built. + as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" + as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' +done +LIBOBJS=$ac_libobjs + +LTLIBOBJS=$ac_ltlibobjs + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5 +printf %s "checking that generated files are newer than configure... " >&6; } + if test -n "$am_sleep_pid"; then + # Hide warnings about reused PIDs. + wait $am_sleep_pid 2>/dev/null + fi + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: done" >&5 +printf "%s\n" "done" >&6; } + if test -n "$EXEEXT"; then + am__EXEEXT_TRUE= + am__EXEEXT_FALSE='#' +else + am__EXEEXT_TRUE='#' + am__EXEEXT_FALSE= +fi + +if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then + as_fn_error $? "conditional \"AMDEP\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then + as_fn_error $? "conditional \"am__fastdepCC\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi + +: "${CONFIG_STATUS=./config.status}" +ac_write_fail=0 +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files $CONFIG_STATUS" +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 +printf "%s\n" "$as_me: creating $CONFIG_STATUS" >&6;} +as_write_fail=0 +cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 +#! $SHELL +# Generated by $as_me. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false + +SHELL=\${CONFIG_SHELL-$SHELL} +export SHELL +_ASEOF +cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +as_nop=: +if test ${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1 +then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else $as_nop + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + + +# Reset variables that may have inherited troublesome values from +# the environment. + +# IFS needs to be set, to space, tab, and newline, in precisely that order. +# (If _AS_PATH_WALK were called with IFS unset, it would have the +# side effect of setting IFS to empty, thus disabling word splitting.) +# Quoting is to prevent editors from complaining about space-tab. +as_nl=' +' +export as_nl +IFS=" "" $as_nl" + +PS1='$ ' +PS2='> ' +PS4='+ ' + +# Ensure predictable behavior from utilities with locale-dependent output. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# We cannot yet rely on "unset" to work, but we need these variables +# to be unset--not just set to an empty or harmless value--now, to +# avoid bugs in old shells (e.g. pre-3.0 UWIN ksh). This construct +# also avoids known problems related to "unset" and subshell syntax +# in other old shells (e.g. bash 2.01 and pdksh 5.2.14). +for as_var in BASH_ENV ENV MAIL MAILPATH CDPATH +do eval test \${$as_var+y} \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done + +# Ensure that fds 0, 1, and 2 are open. +if (exec 3>&0) 2>/dev/null; then :; else exec 0&1) 2>/dev/null; then :; else exec 1>/dev/null; fi +if (exec 3>&2) ; then :; else exec 2>/dev/null; fi + +# The user is always right. +if ${PATH_SEPARATOR+false} :; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + test -r "$as_dir$0" && as_myself=$as_dir$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + printf "%s\n" "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + printf "%s\n" "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + + + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset + +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null +then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else $as_nop + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null +then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else $as_nop + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +printf "%s\n" X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + + +# Determine whether it's possible to make 'echo' print without a newline. +# These variables are no longer used directly by Autoconf, but are AC_SUBSTed +# for compatibility with existing Makefiles. +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +# For backward compatibility with old third-party macros, we provide +# the shell variables $as_echo and $as_echo_n. New code should use +# AS_ECHO(["message"]) and AS_ECHO_N(["message"]), respectively. +as_echo='printf %s\n' +as_echo_n='printf %s' + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`printf "%s\n" "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +printf "%s\n" X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +exec 6>&1 +## ----------------------------------- ## +## Main body of $CONFIG_STATUS script. ## +## ----------------------------------- ## +_ASEOF +test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# Save the log message, to keep $0 and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by libenet $as_me 1.3.18, which was +generated by GNU Autoconf 2.71. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" + +_ACEOF + +case $ac_config_files in *" +"*) set x $ac_config_files; shift; ac_config_files=$*;; +esac + + + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# Files that config.status was made for. +config_files="$ac_config_files" +config_commands="$ac_config_commands" + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +ac_cs_usage="\ +\`$as_me' instantiates files and other configuration actions +from templates according to the current configuration. Unless the files +and actions are specified as TAGs, all are instantiated by default. + +Usage: $0 [OPTION]... [TAG]... + + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + --config print configuration, then exit + -q, --quiet, --silent + do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + +Configuration files: +$config_files + +Configuration commands: +$config_commands + +Report bugs to the package provider." + +_ACEOF +ac_cs_config=`printf "%s\n" "$ac_configure_args" | sed "$ac_safe_unquote"` +ac_cs_config_escaped=`printf "%s\n" "$ac_cs_config" | sed "s/^ //; s/'/'\\\\\\\\''/g"` +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_cs_config='$ac_cs_config_escaped' +ac_cs_version="\\ +libenet config.status 1.3.18 +configured by $0, generated by GNU Autoconf 2.71, + with options \\"\$ac_cs_config\\" + +Copyright (C) 2021 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." + +ac_pwd='$ac_pwd' +srcdir='$srcdir' +INSTALL='$INSTALL' +MKDIR_P='$MKDIR_P' +AWK='$AWK' +test -n "\$AWK" || AWK=awk +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# The default lists apply if the user does not specify any file. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=?*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + --*=) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg= + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + printf "%s\n" "$ac_cs_version"; exit ;; + --config | --confi | --conf | --con | --co | --c ) + printf "%s\n" "$ac_cs_config"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`printf "%s\n" "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + '') as_fn_error $? "missing file argument" ;; + esac + as_fn_append CONFIG_FILES " '$ac_optarg'" + ac_need_defaults=false;; + --he | --h | --help | --hel | -h ) + printf "%s\n" "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) as_fn_error $? "unrecognized option: \`$1' +Try \`$0 --help' for more information." ;; + + *) as_fn_append ac_config_targets " $1" + ac_need_defaults=false ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +if \$ac_cs_recheck; then + set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion + shift + \printf "%s\n" "running CONFIG_SHELL=$SHELL \$*" >&6 + CONFIG_SHELL='$SHELL' + export CONFIG_SHELL + exec "\$@" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + printf "%s\n" "$ac_log" +} >&5 + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# +# INIT-COMMANDS +# +AMDEP_TRUE="$AMDEP_TRUE" MAKE="${MAKE-make}" + + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +sed_quote_subst='$sed_quote_subst' +double_quote_subst='$double_quote_subst' +delay_variable_subst='$delay_variable_subst' +macro_version='`$ECHO "$macro_version" | $SED "$delay_single_quote_subst"`' +macro_revision='`$ECHO "$macro_revision" | $SED "$delay_single_quote_subst"`' +enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`' +enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`' +pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`' +enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`' +shared_archive_member_spec='`$ECHO "$shared_archive_member_spec" | $SED "$delay_single_quote_subst"`' +SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`' +ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`' +PATH_SEPARATOR='`$ECHO "$PATH_SEPARATOR" | $SED "$delay_single_quote_subst"`' +host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`' +host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`' +host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`' +build_alias='`$ECHO "$build_alias" | $SED "$delay_single_quote_subst"`' +build='`$ECHO "$build" | $SED "$delay_single_quote_subst"`' +build_os='`$ECHO "$build_os" | $SED "$delay_single_quote_subst"`' +SED='`$ECHO "$SED" | $SED "$delay_single_quote_subst"`' +Xsed='`$ECHO "$Xsed" | $SED "$delay_single_quote_subst"`' +GREP='`$ECHO "$GREP" | $SED "$delay_single_quote_subst"`' +EGREP='`$ECHO "$EGREP" | $SED "$delay_single_quote_subst"`' +FGREP='`$ECHO "$FGREP" | $SED "$delay_single_quote_subst"`' +LD='`$ECHO "$LD" | $SED "$delay_single_quote_subst"`' +NM='`$ECHO "$NM" | $SED "$delay_single_quote_subst"`' +LN_S='`$ECHO "$LN_S" | $SED "$delay_single_quote_subst"`' +max_cmd_len='`$ECHO "$max_cmd_len" | $SED "$delay_single_quote_subst"`' +ac_objext='`$ECHO "$ac_objext" | $SED "$delay_single_quote_subst"`' +exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' +lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' +lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' +lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' +lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' +lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' +reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' +reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' +FILECMD='`$ECHO "$FILECMD" | $SED "$delay_single_quote_subst"`' +OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' +deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' +file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' +file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' +want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' +DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' +sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' +AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' +lt_ar_flags='`$ECHO "$lt_ar_flags" | $SED "$delay_single_quote_subst"`' +AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' +archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' +STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' +RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' +old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' +old_postuninstall_cmds='`$ECHO "$old_postuninstall_cmds" | $SED "$delay_single_quote_subst"`' +old_archive_cmds='`$ECHO "$old_archive_cmds" | $SED "$delay_single_quote_subst"`' +lock_old_archive_extraction='`$ECHO "$lock_old_archive_extraction" | $SED "$delay_single_quote_subst"`' +CC='`$ECHO "$CC" | $SED "$delay_single_quote_subst"`' +CFLAGS='`$ECHO "$CFLAGS" | $SED "$delay_single_quote_subst"`' +compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`' +GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_import='`$ECHO "$lt_cv_sys_global_symbol_to_import" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' +lt_cv_nm_interface='`$ECHO "$lt_cv_nm_interface" | $SED "$delay_single_quote_subst"`' +nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' +lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' +lt_cv_truncate_bin='`$ECHO "$lt_cv_truncate_bin" | $SED "$delay_single_quote_subst"`' +objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' +MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' +lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' +need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' +MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' +DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' +NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' +LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' +OTOOL='`$ECHO "$OTOOL" | $SED "$delay_single_quote_subst"`' +OTOOL64='`$ECHO "$OTOOL64" | $SED "$delay_single_quote_subst"`' +libext='`$ECHO "$libext" | $SED "$delay_single_quote_subst"`' +shrext_cmds='`$ECHO "$shrext_cmds" | $SED "$delay_single_quote_subst"`' +extract_expsyms_cmds='`$ECHO "$extract_expsyms_cmds" | $SED "$delay_single_quote_subst"`' +archive_cmds_need_lc='`$ECHO "$archive_cmds_need_lc" | $SED "$delay_single_quote_subst"`' +enable_shared_with_static_runtimes='`$ECHO "$enable_shared_with_static_runtimes" | $SED "$delay_single_quote_subst"`' +export_dynamic_flag_spec='`$ECHO "$export_dynamic_flag_spec" | $SED "$delay_single_quote_subst"`' +whole_archive_flag_spec='`$ECHO "$whole_archive_flag_spec" | $SED "$delay_single_quote_subst"`' +compiler_needs_object='`$ECHO "$compiler_needs_object" | $SED "$delay_single_quote_subst"`' +old_archive_from_new_cmds='`$ECHO "$old_archive_from_new_cmds" | $SED "$delay_single_quote_subst"`' +old_archive_from_expsyms_cmds='`$ECHO "$old_archive_from_expsyms_cmds" | $SED "$delay_single_quote_subst"`' +archive_cmds='`$ECHO "$archive_cmds" | $SED "$delay_single_quote_subst"`' +archive_expsym_cmds='`$ECHO "$archive_expsym_cmds" | $SED "$delay_single_quote_subst"`' +module_cmds='`$ECHO "$module_cmds" | $SED "$delay_single_quote_subst"`' +module_expsym_cmds='`$ECHO "$module_expsym_cmds" | $SED "$delay_single_quote_subst"`' +with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`' +allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`' +no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`' +hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`' +hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`' +hardcode_minus_L='`$ECHO "$hardcode_minus_L" | $SED "$delay_single_quote_subst"`' +hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_quote_subst"`' +hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' +inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' +link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' +always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' +export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' +exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' +include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' +prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' +postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' +file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' +variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' +need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' +need_version='`$ECHO "$need_version" | $SED "$delay_single_quote_subst"`' +version_type='`$ECHO "$version_type" | $SED "$delay_single_quote_subst"`' +runpath_var='`$ECHO "$runpath_var" | $SED "$delay_single_quote_subst"`' +shlibpath_var='`$ECHO "$shlibpath_var" | $SED "$delay_single_quote_subst"`' +shlibpath_overrides_runpath='`$ECHO "$shlibpath_overrides_runpath" | $SED "$delay_single_quote_subst"`' +libname_spec='`$ECHO "$libname_spec" | $SED "$delay_single_quote_subst"`' +library_names_spec='`$ECHO "$library_names_spec" | $SED "$delay_single_quote_subst"`' +soname_spec='`$ECHO "$soname_spec" | $SED "$delay_single_quote_subst"`' +install_override_mode='`$ECHO "$install_override_mode" | $SED "$delay_single_quote_subst"`' +postinstall_cmds='`$ECHO "$postinstall_cmds" | $SED "$delay_single_quote_subst"`' +postuninstall_cmds='`$ECHO "$postuninstall_cmds" | $SED "$delay_single_quote_subst"`' +finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`' +finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`' +hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`' +sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`' +configure_time_dlsearch_path='`$ECHO "$configure_time_dlsearch_path" | $SED "$delay_single_quote_subst"`' +configure_time_lt_sys_library_path='`$ECHO "$configure_time_lt_sys_library_path" | $SED "$delay_single_quote_subst"`' +hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`' +enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`' +enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`' +enable_dlopen_self_static='`$ECHO "$enable_dlopen_self_static" | $SED "$delay_single_quote_subst"`' +old_striplib='`$ECHO "$old_striplib" | $SED "$delay_single_quote_subst"`' +striplib='`$ECHO "$striplib" | $SED "$delay_single_quote_subst"`' + +LTCC='$LTCC' +LTCFLAGS='$LTCFLAGS' +compiler='$compiler_DEFAULT' + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$1 +_LTECHO_EOF' +} + +# Quote evaled strings. +for var in SHELL \ +ECHO \ +PATH_SEPARATOR \ +SED \ +GREP \ +EGREP \ +FGREP \ +LD \ +NM \ +LN_S \ +lt_SP2NL \ +lt_NL2SP \ +reload_flag \ +FILECMD \ +OBJDUMP \ +deplibs_check_method \ +file_magic_cmd \ +file_magic_glob \ +want_nocaseglob \ +DLLTOOL \ +sharedlib_from_linklib_cmd \ +AR \ +archiver_list_spec \ +STRIP \ +RANLIB \ +CC \ +CFLAGS \ +compiler \ +lt_cv_sys_global_symbol_pipe \ +lt_cv_sys_global_symbol_to_cdecl \ +lt_cv_sys_global_symbol_to_import \ +lt_cv_sys_global_symbol_to_c_name_address \ +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ +lt_cv_nm_interface \ +nm_file_list_spec \ +lt_cv_truncate_bin \ +lt_prog_compiler_no_builtin_flag \ +lt_prog_compiler_pic \ +lt_prog_compiler_wl \ +lt_prog_compiler_static \ +lt_cv_prog_compiler_c_o \ +need_locks \ +MANIFEST_TOOL \ +DSYMUTIL \ +NMEDIT \ +LIPO \ +OTOOL \ +OTOOL64 \ +shrext_cmds \ +export_dynamic_flag_spec \ +whole_archive_flag_spec \ +compiler_needs_object \ +with_gnu_ld \ +allow_undefined_flag \ +no_undefined_flag \ +hardcode_libdir_flag_spec \ +hardcode_libdir_separator \ +exclude_expsyms \ +include_expsyms \ +file_list_spec \ +variables_saved_for_relink \ +libname_spec \ +library_names_spec \ +soname_spec \ +install_override_mode \ +finish_eval \ +old_striplib \ +striplib; do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[\\\\\\\`\\"\\\$]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +# Double-quote double-evaled strings. +for var in reload_cmds \ +old_postinstall_cmds \ +old_postuninstall_cmds \ +old_archive_cmds \ +extract_expsyms_cmds \ +old_archive_from_new_cmds \ +old_archive_from_expsyms_cmds \ +archive_cmds \ +archive_expsym_cmds \ +module_cmds \ +module_expsym_cmds \ +export_symbols_cmds \ +prelink_cmds \ +postlink_cmds \ +postinstall_cmds \ +postuninstall_cmds \ +finish_cmds \ +sys_lib_search_path_spec \ +configure_time_dlsearch_path \ +configure_time_lt_sys_library_path; do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[\\\\\\\`\\"\\\$]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +ac_aux_dir='$ac_aux_dir' + +# See if we are running on zsh, and set the options that allow our +# commands through without removal of \ escapes INIT. +if test -n "\${ZSH_VERSION+set}"; then + setopt NO_GLOB_SUBST +fi + + + PACKAGE='$PACKAGE' + VERSION='$VERSION' + RM='$RM' + ofile='$ofile' + + + + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; + "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; + "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "libenet.pc") CONFIG_FILES="$CONFIG_FILES libenet.pc" ;; + + *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; + esac +done + + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test ${CONFIG_FILES+y} || CONFIG_FILES=$config_files + test ${CONFIG_COMMANDS+y} || CONFIG_COMMANDS=$config_commands +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= ac_tmp= + trap 'exit_status=$? + : "${ac_tmp:=$tmp}" + { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status +' 0 + trap 'as_fn_exit 1' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 +ac_tmp=$tmp + +# Set up the scripts for CONFIG_FILES section. +# No need to generate them if there are no CONFIG_FILES. +# This happens for instance with `./config.status config.h'. +if test -n "$CONFIG_FILES"; then + + +ac_cr=`echo X | tr X '\015'` +# On cygwin, bash can eat \r inside `` if the user requested igncr. +# But we know of no other shell where ac_cr would be empty at this +# point, so we can use a bashism as a fallback. +if test "x$ac_cr" = x; then + eval ac_cr=\$\'\\r\' +fi +ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` +if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then + ac_cs_awk_cr='\\r' +else + ac_cs_awk_cr=$ac_cr +fi + +echo 'BEGIN {' >"$ac_tmp/subs1.awk" && +_ACEOF + + +{ + echo "cat >conf$$subs.awk <<_ACEOF" && + echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && + echo "_ACEOF" +} >conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 +ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` +ac_delim='%!_!# ' +for ac_last_try in false false false false false :; do + . ./conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + + ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` + if test $ac_delim_n = $ac_delim_num; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done +rm -f conf$$subs.sh + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && +_ACEOF +sed -n ' +h +s/^/S["/; s/!.*/"]=/ +p +g +s/^[^!]*!// +:repl +t repl +s/'"$ac_delim"'$// +t delim +:nl +h +s/\(.\{148\}\)..*/\1/ +t more1 +s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ +p +n +b repl +:more1 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t nl +:delim +h +s/\(.\{148\}\)..*/\1/ +t more2 +s/["\\]/\\&/g; s/^/"/; s/$/"/ +p +b +:more2 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t delim +' >$CONFIG_STATUS || ac_write_fail=1 +rm -f conf$$subs.awk +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +_ACAWK +cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && + for (key in S) S_is_set[key] = 1 + FS = "" + +} +{ + line = $ 0 + nfields = split(line, field, "@") + substed = 0 + len = length(field[1]) + for (i = 2; i < nfields; i++) { + key = field[i] + keylen = length(key) + if (S_is_set[key]) { + value = S[key] + line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) + len += length(value) + length(field[++i]) + substed = 1 + } else + len += 1 + keylen + } + + print line +} + +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then + sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" +else + cat +fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ + || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 +_ACEOF + +# VPATH may cause trouble with some makes, so we remove sole $(srcdir), +# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and +# trailing colons and then remove the whole line if VPATH becomes empty +# (actually we leave an empty line to preserve line numbers). +if test "x$srcdir" = x.; then + ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ +h +s/// +s/^/:/ +s/[ ]*$/:/ +s/:\$(srcdir):/:/g +s/:\${srcdir}:/:/g +s/:@srcdir@:/:/g +s/^:*// +s/:*$// +x +s/\(=[ ]*\).*/\1/ +G +s/\n// +s/^[^=]*=[ ]*$// +}' +fi + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +fi # test -n "$CONFIG_FILES" + + +eval set X " :F $CONFIG_FILES :C $CONFIG_COMMANDS" +shift +for ac_tag +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$ac_tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; + esac + case $ac_f in *\'*) ac_f=`printf "%s\n" "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + as_fn_append ac_file_inputs " '$ac_f'" + done + + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input='Generated from '` + printf "%s\n" "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + `' by configure.' + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 +printf "%s\n" "$as_me: creating $ac_file" >&6;} + fi + # Neutralize special characters interpreted by sed in replacement strings. + case $configure_input in #( + *\&* | *\|* | *\\* ) + ac_sed_conf_input=`printf "%s\n" "$configure_input" | + sed 's/[\\\\&|]/\\\\&/g'`;; #( + *) ac_sed_conf_input=$configure_input;; + esac + + case $ac_tag in + *:-:* | *:-) cat >"$ac_tmp/stdin" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +printf "%s\n" X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir="$ac_dir"; as_fn_mkdir_p + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`printf "%s\n" "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`printf "%s\n" "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + + case $ac_mode in + :F) + # + # CONFIG_FILE + # + + case $INSTALL in + [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; + *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; + esac + ac_MKDIR_P=$MKDIR_P + case $MKDIR_P in + [\\/$]* | ?:[\\/]* ) ;; + */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; + esac +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# If the template does not know about datarootdir, expand it. +# FIXME: This hack should be removed a few years after 2.60. +ac_datarootdir_hack=; ac_datarootdir_seen= +ac_sed_dataroot=' +/datarootdir/ { + p + q +} +/@datadir@/p +/@docdir@/p +/@infodir@/p +/@localedir@/p +/@mandir@/p' +case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in +*datarootdir*) ac_datarootdir_seen=yes;; +*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +printf "%s\n" "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + ac_datarootdir_hack=' + s&@datadir@&$datadir&g + s&@docdir@&$docdir&g + s&@infodir@&$infodir&g + s&@localedir@&$localedir&g + s&@mandir@&$mandir&g + s&\\\${datarootdir}&$datarootdir&g' ;; +esac +_ACEOF + +# Neutralize VPATH when `$srcdir' = `.'. +# Shell code in configure.ac might set extrasub. +# FIXME: do we really want to maintain this feature? +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_sed_extra="$ac_vpsub +$extrasub +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s|@configure_input@|$ac_sed_conf_input|;t t +s&@top_builddir@&$ac_top_builddir_sub&;t t +s&@top_build_prefix@&$ac_top_build_prefix&;t t +s&@srcdir@&$ac_srcdir&;t t +s&@abs_srcdir@&$ac_abs_srcdir&;t t +s&@top_srcdir@&$ac_top_srcdir&;t t +s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t +s&@builddir@&$ac_builddir&;t t +s&@abs_builddir@&$ac_abs_builddir&;t t +s&@abs_top_builddir@&$ac_abs_top_builddir&;t t +s&@INSTALL@&$ac_INSTALL&;t t +s&@MKDIR_P@&$ac_MKDIR_P&;t t +$ac_datarootdir_hack +" +eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ + >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + +test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ + "$ac_tmp/out"`; test -z "$ac_out"; } && + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&5 +printf "%s\n" "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&2;} + + rm -f "$ac_tmp/stdin" + case $ac_file in + -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; + *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; + esac \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + ;; + + + :C) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 +printf "%s\n" "$as_me: executing $ac_file commands" >&6;} + ;; + esac + + + case $ac_file$ac_mode in + "depfiles":C) test x"$AMDEP_TRUE" != x"" || { + # Older Autoconf quotes --file arguments for eval, but not when files + # are listed without --file. Let's play safe and only enable the eval + # if we detect the quoting. + # TODO: see whether this extra hack can be removed once we start + # requiring Autoconf 2.70 or later. + case $CONFIG_FILES in #( + *\'*) : + eval set x "$CONFIG_FILES" ;; #( + *) : + set x $CONFIG_FILES ;; #( + *) : + ;; +esac + shift + # Used to flag and report bootstrapping failures. + am_rc=0 + for am_mf + do + # Strip MF so we end up with the name of the file. + am_mf=`printf "%s\n" "$am_mf" | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile which includes + # dependency-tracking related rules and includes. + # Grep'ing the whole file directly is not great: AIX grep has a line + # limit of 2048, but all sed's we know have understand at least 4000. + sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \ + || continue + am_dirpart=`$as_dirname -- "$am_mf" || +$as_expr X"$am_mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$am_mf" : 'X\(//\)[^/]' \| \ + X"$am_mf" : 'X\(//\)$' \| \ + X"$am_mf" : 'X\(/\)' \| . 2>/dev/null || +printf "%s\n" X"$am_mf" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + am_filepart=`$as_basename -- "$am_mf" || +$as_expr X/"$am_mf" : '.*/\([^/][^/]*\)/*$' \| \ + X"$am_mf" : 'X\(//\)$' \| \ + X"$am_mf" : 'X\(/\)' \| . 2>/dev/null || +printf "%s\n" X/"$am_mf" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + { echo "$as_me:$LINENO: cd "$am_dirpart" \ + && sed -e '/# am--include-marker/d' "$am_filepart" \ + | $MAKE -f - am--depfiles" >&5 + (cd "$am_dirpart" \ + && sed -e '/# am--include-marker/d' "$am_filepart" \ + | $MAKE -f - am--depfiles) >&5 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } || am_rc=$? + done + if test $am_rc -ne 0; then + { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "Something went wrong bootstrapping makefile fragments + for automatic dependency tracking. If GNU make was not used, consider + re-running the configure script with MAKE=\"gmake\" (or whatever is + necessary). You can also try re-running configure with the + '--disable-dependency-tracking' option to at least be able to build + the package (albeit without support for automatic dependency tracking). +See \`config.log' for more details" "$LINENO" 5; } + fi + { am_dirpart=; unset am_dirpart;} + { am_filepart=; unset am_filepart;} + { am_mf=; unset am_mf;} + { am_rc=; unset am_rc;} + rm -f conftest-deps.mk +} + ;; + "libtool":C) + + # See if we are running on zsh, and set the options that allow our + # commands through without removal of \ escapes. + if test -n "${ZSH_VERSION+set}"; then + setopt NO_GLOB_SUBST + fi + + cfgfile=${ofile}T + trap "$RM \"$cfgfile\"; exit 1" 1 2 15 + $RM "$cfgfile" + + cat <<_LT_EOF >> "$cfgfile" +#! $SHELL +# Generated automatically by $as_me ($PACKAGE) $VERSION +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: +# NOTE: Changes made to this file will be lost: look at ltmain.sh. + +# Provide generalized library-building support services. +# Written by Gordon Matzigkeit, 1996 + +# Copyright (C) 2014 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program or library that is built +# using GNU Libtool, you may include this file under the same +# distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +# The names of the tagged configurations supported by this script. +available_tags='' + +# Configured defaults for sys_lib_dlsearch_path munging. +: \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"} + +# ### BEGIN LIBTOOL CONFIG + +# Which release of libtool.m4 was used? +macro_version=$macro_version +macro_revision=$macro_revision + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# What type of objects to build. +pic_mode=$pic_mode + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# Shared archive member basename,for filename based shared library versioning on AIX. +shared_archive_member_spec=$shared_archive_member_spec + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# An echo program that protects backslashes. +ECHO=$lt_ECHO + +# The PATH separator for the build system. +PATH_SEPARATOR=$lt_PATH_SEPARATOR + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# A sed program that does not truncate output. +SED=$lt_SED + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="\$SED -e 1s/^X//" + +# A grep program that handles long lines. +GREP=$lt_GREP + +# An ERE matcher. +EGREP=$lt_EGREP + +# A literal string matcher. +FGREP=$lt_FGREP + +# A BSD- or MS-compatible name lister. +NM=$lt_NM + +# Whether we need soft or hard links. +LN_S=$lt_LN_S + +# What is the maximum length of a command? +max_cmd_len=$max_cmd_len + +# Object file suffix (normally "o"). +objext=$ac_objext + +# Executable file suffix (normally ""). +exeext=$exeext + +# whether the shell understands "unset". +lt_unset=$lt_unset + +# turn spaces into newlines. +SP2NL=$lt_lt_SP2NL + +# turn newlines into spaces. +NL2SP=$lt_lt_NL2SP + +# convert \$build file names to \$host format. +to_host_file_cmd=$lt_cv_to_host_file_cmd + +# convert \$build files to toolchain format. +to_tool_file_cmd=$lt_cv_to_tool_file_cmd + +# A file(cmd) program that detects file types. +FILECMD=$lt_FILECMD + +# An object symbol dumper. +OBJDUMP=$lt_OBJDUMP + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method = "file_magic". +file_magic_cmd=$lt_file_magic_cmd + +# How to find potential files when deplibs_check_method = "file_magic". +file_magic_glob=$lt_file_magic_glob + +# Find potential files using nocaseglob when deplibs_check_method = "file_magic". +want_nocaseglob=$lt_want_nocaseglob + +# DLL creation program. +DLLTOOL=$lt_DLLTOOL + +# Command to associate shared and link libraries. +sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd + +# The archiver. +AR=$lt_AR + +# Flags to create an archive (by configure). +lt_ar_flags=$lt_ar_flags + +# Flags to create an archive. +AR_FLAGS=\${ARFLAGS-"\$lt_ar_flags"} + +# How to feed a file listing to the archiver. +archiver_list_spec=$lt_archiver_list_spec + +# A symbol stripping program. +STRIP=$lt_STRIP + +# Commands used to install an old-style archive. +RANLIB=$lt_RANLIB +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Whether to use a lock for old archive extraction. +lock_old_archive_extraction=$lock_old_archive_extraction + +# A C compiler. +LTCC=$lt_CC + +# LTCC compiler flags. +LTCFLAGS=$lt_CFLAGS + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration. +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm into a list of symbols to manually relocate. +global_symbol_to_import=$lt_lt_cv_sys_global_symbol_to_import + +# Transform the output of nm in a C name address pair. +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# Transform the output of nm in a C name address pair when lib prefix is needed. +global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix + +# The name lister interface. +nm_interface=$lt_lt_cv_nm_interface + +# Specify filename containing input files for \$NM. +nm_file_list_spec=$lt_nm_file_list_spec + +# The root where to search for dependent libraries,and where our libraries should be installed. +lt_sysroot=$lt_sysroot + +# Command to truncate a binary pipe. +lt_truncate_bin=$lt_lt_cv_truncate_bin + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# Used to examine libraries when file_magic_cmd begins with "file". +MAGIC_CMD=$MAGIC_CMD + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Manifest tool. +MANIFEST_TOOL=$lt_MANIFEST_TOOL + +# Tool to manipulate archived DWARF debug symbol files on Mac OS X. +DSYMUTIL=$lt_DSYMUTIL + +# Tool to change global to local symbols on Mac OS X. +NMEDIT=$lt_NMEDIT + +# Tool to manipulate fat objects and archives on Mac OS X. +LIPO=$lt_LIPO + +# ldd/readelf like tool for Mach-O binaries on Mac OS X. +OTOOL=$lt_OTOOL + +# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. +OTOOL64=$lt_OTOOL64 + +# Old archive suffix (normally "a"). +libext=$libext + +# Shared library suffix (normally ".so"). +shrext_cmds=$lt_shrext_cmds + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at link time. +variables_saved_for_relink=$lt_variables_saved_for_relink + +# Do we need the "lib" prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Library versioning type. +version_type=$version_type + +# Shared library runtime path variable. +runpath_var=$runpath_var + +# Shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Permission mode override for installation of shared libraries. +install_override_mode=$lt_install_override_mode + +# Command to use after installation of a shared archive. +postinstall_cmds=$lt_postinstall_cmds + +# Command to use after uninstallation of a shared archive. +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# As "finish_cmds", except a single script fragment to be evaled but +# not shown. +finish_eval=$lt_finish_eval + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Compile-time system search path for libraries. +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Detected run-time system search path for libraries. +sys_lib_dlsearch_path_spec=$lt_configure_time_dlsearch_path + +# Explicit LT_SYS_LIBRARY_PATH set during ./configure time. +configure_time_lt_sys_library_path=$lt_configure_time_lt_sys_library_path + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + + +# The linker used to build libraries. +LD=$lt_LD + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# Commands used to build an old-style archive. +old_archive_cmds=$lt_old_archive_cmds + +# A language specific compiler. +CC=$lt_compiler + +# Is the compiler the GNU compiler? +with_gcc=$GCC + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object=$lt_compiler_needs_object + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds + +# Commands used to build a shared archive. +archive_cmds=$lt_archive_cmds +archive_expsym_cmds=$lt_archive_expsym_cmds + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds=$lt_module_cmds +module_expsym_cmds=$lt_module_expsym_cmds + +# Whether we are building with GNU ld or not. +with_gnu_ld=$lt_with_gnu_ld + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag + +# Flag that enforces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=$hardcode_direct + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting \$shlibpath_var if the +# library is relocated. +hardcode_direct_absolute=$hardcode_direct_absolute + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=$hardcode_minus_L + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=$hardcode_automatic + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=$inherit_rpath + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs + +# Set to "yes" if exported symbols are required. +always_export_symbols=$always_export_symbols + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds=$lt_prelink_cmds + +# Commands necessary for finishing linking programs. +postlink_cmds=$lt_postlink_cmds + +# Specify filename containing input files. +file_list_spec=$lt_file_list_spec + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action + +# ### END LIBTOOL CONFIG + +_LT_EOF + + cat <<'_LT_EOF' >> "$cfgfile" + +# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE + +# func_munge_path_list VARIABLE PATH +# ----------------------------------- +# VARIABLE is name of variable containing _space_ separated list of +# directories to be munged by the contents of PATH, which is string +# having a format: +# "DIR[:DIR]:" +# string "DIR[ DIR]" will be prepended to VARIABLE +# ":DIR[:DIR]" +# string "DIR[ DIR]" will be appended to VARIABLE +# "DIRP[:DIRP]::[DIRA:]DIRA" +# string "DIRP[ DIRP]" will be prepended to VARIABLE and string +# "DIRA[ DIRA]" will be appended to VARIABLE +# "DIR[:DIR]" +# VARIABLE will be replaced by "DIR[ DIR]" +func_munge_path_list () +{ + case x$2 in + x) + ;; + *:) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" + ;; + x:*) + eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" + ;; + *::*) + eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" + eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" + ;; + *) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" + ;; + esac +} + + +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +func_cc_basename () +{ + for cc_temp in $*""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac + done + func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +} + + +# ### END FUNCTIONS SHARED WITH CONFIGURE + +_LT_EOF + + case $host_os in + aix3*) + cat <<\_LT_EOF >> "$cfgfile" +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test set != "${COLLECT_NAMES+set}"; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +_LT_EOF + ;; + esac + + + +ltmain=$ac_aux_dir/ltmain.sh + + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + $SED '$q' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) + + mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + + ;; + + esac +done # for ac_tag + + +as_fn_exit 0 +_ACEOF +ac_clean_files=$ac_clean_files_save + +test $ac_write_fail = 0 || + as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 + + +# configure is writing to config.log, and then calls config.status. +# config.status does its own redirection, appending to config.log. +# Unfortunately, on DOS this fails, as config.log is still kept open +# by configure, so config.status won't be able to write to it; its +# output is simply discarded. So we exec the FD to /dev/null, +# effectively closing config.log, so it can be properly (re)opened and +# appended to by config.status. When coming back to configure, we +# need to make the FD available again. +if test "$no_create" != yes; then + ac_cs_success=: + ac_config_status_args= + test "$silent" = yes && + ac_config_status_args="$ac_config_status_args --quiet" + exec 5>/dev/null + $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false + exec 5>>config.log + # Use ||, not &&, to avoid exiting from the if with $? = 1, which + # would make configure fail if this is the last instruction. + $ac_cs_success || as_fn_exit 1 +fi +if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 +printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} +fi + + diff --git a/pkg/enet/enet/configure.ac b/pkg/enet/enet/configure.ac new file mode 100644 index 000000000..552e7df33 --- /dev/null +++ b/pkg/enet/enet/configure.ac @@ -0,0 +1,28 @@ +AC_INIT([libenet], [1.3.18]) +AC_CONFIG_SRCDIR([include/enet/enet.h]) +AM_INIT_AUTOMAKE([foreign]) + +AC_CONFIG_MACRO_DIR([m4]) + +AC_PROG_CC +AC_PROG_LIBTOOL + +AC_CHECK_FUNC(getaddrinfo, [AC_DEFINE(HAS_GETADDRINFO)]) +AC_CHECK_FUNC(getnameinfo, [AC_DEFINE(HAS_GETNAMEINFO)]) +AC_CHECK_FUNC(gethostbyaddr_r, [AC_DEFINE(HAS_GETHOSTBYADDR_R)]) +AC_CHECK_FUNC(gethostbyname_r, [AC_DEFINE(HAS_GETHOSTBYNAME_R)]) +AC_CHECK_FUNC(poll, [AC_DEFINE(HAS_POLL)]) +AC_CHECK_FUNC(fcntl, [AC_DEFINE(HAS_FCNTL)]) +AC_CHECK_FUNC(inet_pton, [AC_DEFINE(HAS_INET_PTON)]) +AC_CHECK_FUNC(inet_ntop, [AC_DEFINE(HAS_INET_NTOP)]) + +AC_CHECK_MEMBER(struct msghdr.msg_flags, [AC_DEFINE(HAS_MSGHDR_FLAGS)], , [#include ]) + +AC_CHECK_TYPE(socklen_t, [AC_DEFINE(HAS_SOCKLEN_T)], , + #include + #include +) + +AC_CONFIG_FILES([Makefile + libenet.pc]) +AC_OUTPUT diff --git a/pkg/enet/enet/depcomp b/pkg/enet/enet/depcomp new file mode 100755 index 000000000..715e34311 --- /dev/null +++ b/pkg/enet/enet/depcomp @@ -0,0 +1,791 @@ +#! /bin/sh +# depcomp - compile a program generating dependencies as side-effects + +scriptversion=2018-03-07.03; # UTC + +# Copyright (C) 1999-2021 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# Originally written by Alexandre Oliva . + +case $1 in + '') + echo "$0: No command. Try '$0 --help' for more information." 1>&2 + exit 1; + ;; + -h | --h*) + cat <<\EOF +Usage: depcomp [--help] [--version] PROGRAM [ARGS] + +Run PROGRAMS ARGS to compile a file, generating dependencies +as side-effects. + +Environment variables: + depmode Dependency tracking mode. + source Source file read by 'PROGRAMS ARGS'. + object Object file output by 'PROGRAMS ARGS'. + DEPDIR directory where to store dependencies. + depfile Dependency file to output. + tmpdepfile Temporary file to use when outputting dependencies. + libtool Whether libtool is used (yes/no). + +Report bugs to . +EOF + exit $? + ;; + -v | --v*) + echo "depcomp $scriptversion" + exit $? + ;; +esac + +# Get the directory component of the given path, and save it in the +# global variables '$dir'. Note that this directory component will +# be either empty or ending with a '/' character. This is deliberate. +set_dir_from () +{ + case $1 in + */*) dir=`echo "$1" | sed -e 's|/[^/]*$|/|'`;; + *) dir=;; + esac +} + +# Get the suffix-stripped basename of the given path, and save it the +# global variable '$base'. +set_base_from () +{ + base=`echo "$1" | sed -e 's|^.*/||' -e 's/\.[^.]*$//'` +} + +# If no dependency file was actually created by the compiler invocation, +# we still have to create a dummy depfile, to avoid errors with the +# Makefile "include basename.Plo" scheme. +make_dummy_depfile () +{ + echo "#dummy" > "$depfile" +} + +# Factor out some common post-processing of the generated depfile. +# Requires the auxiliary global variable '$tmpdepfile' to be set. +aix_post_process_depfile () +{ + # If the compiler actually managed to produce a dependency file, + # post-process it. + if test -f "$tmpdepfile"; then + # Each line is of the form 'foo.o: dependency.h'. + # Do two passes, one to just change these to + # $object: dependency.h + # and one to simply output + # dependency.h: + # which is needed to avoid the deleted-header problem. + { sed -e "s,^.*\.[$lower]*:,$object:," < "$tmpdepfile" + sed -e "s,^.*\.[$lower]*:[$tab ]*,," -e 's,$,:,' < "$tmpdepfile" + } > "$depfile" + rm -f "$tmpdepfile" + else + make_dummy_depfile + fi +} + +# A tabulation character. +tab=' ' +# A newline character. +nl=' +' +# Character ranges might be problematic outside the C locale. +# These definitions help. +upper=ABCDEFGHIJKLMNOPQRSTUVWXYZ +lower=abcdefghijklmnopqrstuvwxyz +digits=0123456789 +alpha=${upper}${lower} + +if test -z "$depmode" || test -z "$source" || test -z "$object"; then + echo "depcomp: Variables source, object and depmode must be set" 1>&2 + exit 1 +fi + +# Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po. +depfile=${depfile-`echo "$object" | + sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`} +tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`} + +rm -f "$tmpdepfile" + +# Avoid interferences from the environment. +gccflag= dashmflag= + +# Some modes work just like other modes, but use different flags. We +# parameterize here, but still list the modes in the big case below, +# to make depend.m4 easier to write. Note that we *cannot* use a case +# here, because this file can only contain one case statement. +if test "$depmode" = hp; then + # HP compiler uses -M and no extra arg. + gccflag=-M + depmode=gcc +fi + +if test "$depmode" = dashXmstdout; then + # This is just like dashmstdout with a different argument. + dashmflag=-xM + depmode=dashmstdout +fi + +cygpath_u="cygpath -u -f -" +if test "$depmode" = msvcmsys; then + # This is just like msvisualcpp but w/o cygpath translation. + # Just convert the backslash-escaped backslashes to single forward + # slashes to satisfy depend.m4 + cygpath_u='sed s,\\\\,/,g' + depmode=msvisualcpp +fi + +if test "$depmode" = msvc7msys; then + # This is just like msvc7 but w/o cygpath translation. + # Just convert the backslash-escaped backslashes to single forward + # slashes to satisfy depend.m4 + cygpath_u='sed s,\\\\,/,g' + depmode=msvc7 +fi + +if test "$depmode" = xlc; then + # IBM C/C++ Compilers xlc/xlC can output gcc-like dependency information. + gccflag=-qmakedep=gcc,-MF + depmode=gcc +fi + +case "$depmode" in +gcc3) +## gcc 3 implements dependency tracking that does exactly what +## we want. Yay! Note: for some reason libtool 1.4 doesn't like +## it if -MD -MP comes after the -MF stuff. Hmm. +## Unfortunately, FreeBSD c89 acceptance of flags depends upon +## the command line argument order; so add the flags where they +## appear in depend2.am. Note that the slowdown incurred here +## affects only configure: in makefiles, %FASTDEP% shortcuts this. + for arg + do + case $arg in + -c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;; + *) set fnord "$@" "$arg" ;; + esac + shift # fnord + shift # $arg + done + "$@" + stat=$? + if test $stat -ne 0; then + rm -f "$tmpdepfile" + exit $stat + fi + mv "$tmpdepfile" "$depfile" + ;; + +gcc) +## Note that this doesn't just cater to obsosete pre-3.x GCC compilers. +## but also to in-use compilers like IMB xlc/xlC and the HP C compiler. +## (see the conditional assignment to $gccflag above). +## There are various ways to get dependency output from gcc. Here's +## why we pick this rather obscure method: +## - Don't want to use -MD because we'd like the dependencies to end +## up in a subdir. Having to rename by hand is ugly. +## (We might end up doing this anyway to support other compilers.) +## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like +## -MM, not -M (despite what the docs say). Also, it might not be +## supported by the other compilers which use the 'gcc' depmode. +## - Using -M directly means running the compiler twice (even worse +## than renaming). + if test -z "$gccflag"; then + gccflag=-MD, + fi + "$@" -Wp,"$gccflag$tmpdepfile" + stat=$? + if test $stat -ne 0; then + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + echo "$object : \\" > "$depfile" + # The second -e expression handles DOS-style file names with drive + # letters. + sed -e 's/^[^:]*: / /' \ + -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile" +## This next piece of magic avoids the "deleted header file" problem. +## The problem is that when a header file which appears in a .P file +## is deleted, the dependency causes make to die (because there is +## typically no way to rebuild the header). We avoid this by adding +## dummy dependencies for each header file. Too bad gcc doesn't do +## this for us directly. +## Some versions of gcc put a space before the ':'. On the theory +## that the space means something, we add a space to the output as +## well. hp depmode also adds that space, but also prefixes the VPATH +## to the object. Take care to not repeat it in the output. +## Some versions of the HPUX 10.20 sed can't process this invocation +## correctly. Breaking it into two sed invocations is a workaround. + tr ' ' "$nl" < "$tmpdepfile" \ + | sed -e 's/^\\$//' -e '/^$/d' -e "s|.*$object$||" -e '/:$/d' \ + | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +hp) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +sgi) + if test "$libtool" = yes; then + "$@" "-Wp,-MDupdate,$tmpdepfile" + else + "$@" -MDupdate "$tmpdepfile" + fi + stat=$? + if test $stat -ne 0; then + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + + if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files + echo "$object : \\" > "$depfile" + # Clip off the initial element (the dependent). Don't try to be + # clever and replace this with sed code, as IRIX sed won't handle + # lines with more than a fixed number of characters (4096 in + # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines; + # the IRIX cc adds comments like '#:fec' to the end of the + # dependency line. + tr ' ' "$nl" < "$tmpdepfile" \ + | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' \ + | tr "$nl" ' ' >> "$depfile" + echo >> "$depfile" + # The second pass generates a dummy entry for each header file. + tr ' ' "$nl" < "$tmpdepfile" \ + | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \ + >> "$depfile" + else + make_dummy_depfile + fi + rm -f "$tmpdepfile" + ;; + +xlc) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +aix) + # The C for AIX Compiler uses -M and outputs the dependencies + # in a .u file. In older versions, this file always lives in the + # current directory. Also, the AIX compiler puts '$object:' at the + # start of each line; $object doesn't have directory information. + # Version 6 uses the directory in both cases. + set_dir_from "$object" + set_base_from "$object" + if test "$libtool" = yes; then + tmpdepfile1=$dir$base.u + tmpdepfile2=$base.u + tmpdepfile3=$dir.libs/$base.u + "$@" -Wc,-M + else + tmpdepfile1=$dir$base.u + tmpdepfile2=$dir$base.u + tmpdepfile3=$dir$base.u + "$@" -M + fi + stat=$? + if test $stat -ne 0; then + rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" + exit $stat + fi + + for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" + do + test -f "$tmpdepfile" && break + done + aix_post_process_depfile + ;; + +tcc) + # tcc (Tiny C Compiler) understand '-MD -MF file' since version 0.9.26 + # FIXME: That version still under development at the moment of writing. + # Make that this statement remains true also for stable, released + # versions. + # It will wrap lines (doesn't matter whether long or short) with a + # trailing '\', as in: + # + # foo.o : \ + # foo.c \ + # foo.h \ + # + # It will put a trailing '\' even on the last line, and will use leading + # spaces rather than leading tabs (at least since its commit 0394caf7 + # "Emit spaces for -MD"). + "$@" -MD -MF "$tmpdepfile" + stat=$? + if test $stat -ne 0; then + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + # Each non-empty line is of the form 'foo.o : \' or ' dep.h \'. + # We have to change lines of the first kind to '$object: \'. + sed -e "s|.*:|$object :|" < "$tmpdepfile" > "$depfile" + # And for each line of the second kind, we have to emit a 'dep.h:' + # dummy dependency, to avoid the deleted-header problem. + sed -n -e 's|^ *\(.*\) *\\$|\1:|p' < "$tmpdepfile" >> "$depfile" + rm -f "$tmpdepfile" + ;; + +## The order of this option in the case statement is important, since the +## shell code in configure will try each of these formats in the order +## listed in this file. A plain '-MD' option would be understood by many +## compilers, so we must ensure this comes after the gcc and icc options. +pgcc) + # Portland's C compiler understands '-MD'. + # Will always output deps to 'file.d' where file is the root name of the + # source file under compilation, even if file resides in a subdirectory. + # The object file name does not affect the name of the '.d' file. + # pgcc 10.2 will output + # foo.o: sub/foo.c sub/foo.h + # and will wrap long lines using '\' : + # foo.o: sub/foo.c ... \ + # sub/foo.h ... \ + # ... + set_dir_from "$object" + # Use the source, not the object, to determine the base name, since + # that's sadly what pgcc will do too. + set_base_from "$source" + tmpdepfile=$base.d + + # For projects that build the same source file twice into different object + # files, the pgcc approach of using the *source* file root name can cause + # problems in parallel builds. Use a locking strategy to avoid stomping on + # the same $tmpdepfile. + lockdir=$base.d-lock + trap " + echo '$0: caught signal, cleaning up...' >&2 + rmdir '$lockdir' + exit 1 + " 1 2 13 15 + numtries=100 + i=$numtries + while test $i -gt 0; do + # mkdir is a portable test-and-set. + if mkdir "$lockdir" 2>/dev/null; then + # This process acquired the lock. + "$@" -MD + stat=$? + # Release the lock. + rmdir "$lockdir" + break + else + # If the lock is being held by a different process, wait + # until the winning process is done or we timeout. + while test -d "$lockdir" && test $i -gt 0; do + sleep 1 + i=`expr $i - 1` + done + fi + i=`expr $i - 1` + done + trap - 1 2 13 15 + if test $i -le 0; then + echo "$0: failed to acquire lock after $numtries attempts" >&2 + echo "$0: check lockdir '$lockdir'" >&2 + exit 1 + fi + + if test $stat -ne 0; then + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + # Each line is of the form `foo.o: dependent.h', + # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'. + # Do two passes, one to just change these to + # `$object: dependent.h' and one to simply `dependent.h:'. + sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile" + # Some versions of the HPUX 10.20 sed can't process this invocation + # correctly. Breaking it into two sed invocations is a workaround. + sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" \ + | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +hp2) + # The "hp" stanza above does not work with aCC (C++) and HP's ia64 + # compilers, which have integrated preprocessors. The correct option + # to use with these is +Maked; it writes dependencies to a file named + # 'foo.d', which lands next to the object file, wherever that + # happens to be. + # Much of this is similar to the tru64 case; see comments there. + set_dir_from "$object" + set_base_from "$object" + if test "$libtool" = yes; then + tmpdepfile1=$dir$base.d + tmpdepfile2=$dir.libs/$base.d + "$@" -Wc,+Maked + else + tmpdepfile1=$dir$base.d + tmpdepfile2=$dir$base.d + "$@" +Maked + fi + stat=$? + if test $stat -ne 0; then + rm -f "$tmpdepfile1" "$tmpdepfile2" + exit $stat + fi + + for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" + do + test -f "$tmpdepfile" && break + done + if test -f "$tmpdepfile"; then + sed -e "s,^.*\.[$lower]*:,$object:," "$tmpdepfile" > "$depfile" + # Add 'dependent.h:' lines. + sed -ne '2,${ + s/^ *// + s/ \\*$// + s/$/:/ + p + }' "$tmpdepfile" >> "$depfile" + else + make_dummy_depfile + fi + rm -f "$tmpdepfile" "$tmpdepfile2" + ;; + +tru64) + # The Tru64 compiler uses -MD to generate dependencies as a side + # effect. 'cc -MD -o foo.o ...' puts the dependencies into 'foo.o.d'. + # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put + # dependencies in 'foo.d' instead, so we check for that too. + # Subdirectories are respected. + set_dir_from "$object" + set_base_from "$object" + + if test "$libtool" = yes; then + # Libtool generates 2 separate objects for the 2 libraries. These + # two compilations output dependencies in $dir.libs/$base.o.d and + # in $dir$base.o.d. We have to check for both files, because + # one of the two compilations can be disabled. We should prefer + # $dir$base.o.d over $dir.libs/$base.o.d because the latter is + # automatically cleaned when .libs/ is deleted, while ignoring + # the former would cause a distcleancheck panic. + tmpdepfile1=$dir$base.o.d # libtool 1.5 + tmpdepfile2=$dir.libs/$base.o.d # Likewise. + tmpdepfile3=$dir.libs/$base.d # Compaq CCC V6.2-504 + "$@" -Wc,-MD + else + tmpdepfile1=$dir$base.d + tmpdepfile2=$dir$base.d + tmpdepfile3=$dir$base.d + "$@" -MD + fi + + stat=$? + if test $stat -ne 0; then + rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" + exit $stat + fi + + for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" + do + test -f "$tmpdepfile" && break + done + # Same post-processing that is required for AIX mode. + aix_post_process_depfile + ;; + +msvc7) + if test "$libtool" = yes; then + showIncludes=-Wc,-showIncludes + else + showIncludes=-showIncludes + fi + "$@" $showIncludes > "$tmpdepfile" + stat=$? + grep -v '^Note: including file: ' "$tmpdepfile" + if test $stat -ne 0; then + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + echo "$object : \\" > "$depfile" + # The first sed program below extracts the file names and escapes + # backslashes for cygpath. The second sed program outputs the file + # name when reading, but also accumulates all include files in the + # hold buffer in order to output them again at the end. This only + # works with sed implementations that can handle large buffers. + sed < "$tmpdepfile" -n ' +/^Note: including file: *\(.*\)/ { + s//\1/ + s/\\/\\\\/g + p +}' | $cygpath_u | sort -u | sed -n ' +s/ /\\ /g +s/\(.*\)/'"$tab"'\1 \\/p +s/.\(.*\) \\/\1:/ +H +$ { + s/.*/'"$tab"'/ + G + p +}' >> "$depfile" + echo >> "$depfile" # make sure the fragment doesn't end with a backslash + rm -f "$tmpdepfile" + ;; + +msvc7msys) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +#nosideeffect) + # This comment above is used by automake to tell side-effect + # dependency tracking mechanisms from slower ones. + +dashmstdout) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout, regardless of -o. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + + # Remove '-o $object'. + IFS=" " + for arg + do + case $arg in + -o) + shift + ;; + $object) + shift + ;; + *) + set fnord "$@" "$arg" + shift # fnord + shift # $arg + ;; + esac + done + + test -z "$dashmflag" && dashmflag=-M + # Require at least two characters before searching for ':' + # in the target name. This is to cope with DOS-style filenames: + # a dependency such as 'c:/foo/bar' could be seen as target 'c' otherwise. + "$@" $dashmflag | + sed "s|^[$tab ]*[^:$tab ][^:][^:]*:[$tab ]*|$object: |" > "$tmpdepfile" + rm -f "$depfile" + cat < "$tmpdepfile" > "$depfile" + # Some versions of the HPUX 10.20 sed can't process this sed invocation + # correctly. Breaking it into two sed invocations is a workaround. + tr ' ' "$nl" < "$tmpdepfile" \ + | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \ + | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +dashXmstdout) + # This case only exists to satisfy depend.m4. It is never actually + # run, as this mode is specially recognized in the preamble. + exit 1 + ;; + +makedepend) + "$@" || exit $? + # Remove any Libtool call + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + # X makedepend + shift + cleared=no eat=no + for arg + do + case $cleared in + no) + set ""; shift + cleared=yes ;; + esac + if test $eat = yes; then + eat=no + continue + fi + case "$arg" in + -D*|-I*) + set fnord "$@" "$arg"; shift ;; + # Strip any option that makedepend may not understand. Remove + # the object too, otherwise makedepend will parse it as a source file. + -arch) + eat=yes ;; + -*|$object) + ;; + *) + set fnord "$@" "$arg"; shift ;; + esac + done + obj_suffix=`echo "$object" | sed 's/^.*\././'` + touch "$tmpdepfile" + ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@" + rm -f "$depfile" + # makedepend may prepend the VPATH from the source file name to the object. + # No need to regex-escape $object, excess matching of '.' is harmless. + sed "s|^.*\($object *:\)|\1|" "$tmpdepfile" > "$depfile" + # Some versions of the HPUX 10.20 sed can't process the last invocation + # correctly. Breaking it into two sed invocations is a workaround. + sed '1,2d' "$tmpdepfile" \ + | tr ' ' "$nl" \ + | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \ + | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" "$tmpdepfile".bak + ;; + +cpp) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + + # Remove '-o $object'. + IFS=" " + for arg + do + case $arg in + -o) + shift + ;; + $object) + shift + ;; + *) + set fnord "$@" "$arg" + shift # fnord + shift # $arg + ;; + esac + done + + "$@" -E \ + | sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ + -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ + | sed '$ s: \\$::' > "$tmpdepfile" + rm -f "$depfile" + echo "$object : \\" > "$depfile" + cat < "$tmpdepfile" >> "$depfile" + sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +msvisualcpp) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + + IFS=" " + for arg + do + case "$arg" in + -o) + shift + ;; + $object) + shift + ;; + "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI") + set fnord "$@" + shift + shift + ;; + *) + set fnord "$@" "$arg" + shift + shift + ;; + esac + done + "$@" -E 2>/dev/null | + sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile" + rm -f "$depfile" + echo "$object : \\" > "$depfile" + sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::'"$tab"'\1 \\:p' >> "$depfile" + echo "$tab" >> "$depfile" + sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +msvcmsys) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +none) + exec "$@" + ;; + +*) + echo "Unknown depmode $depmode" 1>&2 + exit 1 + ;; +esac + +exit 0 + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC0" +# time-stamp-end: "; # UTC" +# End: diff --git a/pkg/enet/enet/enet.dsp b/pkg/enet/enet/enet.dsp new file mode 100644 index 000000000..285c020cb --- /dev/null +++ b/pkg/enet/enet/enet.dsp @@ -0,0 +1,168 @@ +# Microsoft Developer Studio Project File - Name="enet" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Static Library" 0x0104 + +CFG=enet - Win32 Debug +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "enet.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "enet.mak" CFG="enet - Win32 Debug" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "enet - Win32 Release" (based on "Win32 (x86) Static Library") +!MESSAGE "enet - Win32 Debug" (based on "Win32 (x86) Static Library") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "enet - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Target_Dir "" +MTL=midl.exe +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_MBCS" /D "_LIB" /YX /FD /c +# ADD CPP /nologo /W3 /O2 /I "include" /D "WIN32" /D "NDEBUG" /D "_MBCS" /D "_LIB" /FD /c +# SUBTRACT CPP /YX +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LIB32=link.exe -lib +# ADD BASE LIB32 /nologo +# ADD LIB32 /nologo + +!ELSEIF "$(CFG)" == "enet - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Target_Dir "" +MTL=midl.exe +# ADD BASE CPP /nologo /W3 /Gm /GX /ZI /Od /D "WIN32" /D "_DEBUG" /D "_MBCS" /D "_LIB" /YX /FD /GZ /c +# ADD CPP /nologo /G6 /MTd /W3 /ZI /Od /I "include" /D "WIN32" /D "_DEBUG" /D "_MBCS" /D "_LIB" /FR /FD /GZ /c +# SUBTRACT CPP /YX +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LIB32=link.exe -lib +# ADD BASE LIB32 /nologo +# ADD LIB32 /nologo + +!ENDIF + +# Begin Target + +# Name "enet - Win32 Release" +# Name "enet - Win32 Debug" +# Begin Group "Source Files" + +# PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;idl;hpj;bat" +# Begin Source File + +SOURCE=.\host.c +# End Source File +# Begin Source File + +SOURCE=.\list.c +# End Source File +# Begin Source File + +SOURCE=.\callbacks.c +# End Source File +# Begin Source File + +SOURCE=.\compress.c +# End Source File +# Begin Source File + +SOURCE=.\packet.c +# End Source File +# Begin Source File + +SOURCE=.\peer.c +# End Source File +# Begin Source File + +SOURCE=.\protocol.c +# End Source File +# Begin Source File + +SOURCE=.\unix.c +# End Source File +# Begin Source File + +SOURCE=.\win32.c +# End Source File +# End Group +# Begin Group "Header Files" + +# PROP Default_Filter "h;hpp;hxx;hm;inl" +# Begin Source File + +SOURCE=.\include\enet\enet.h +# End Source File +# Begin Source File + +SOURCE=.\include\enet\list.h +# End Source File +# Begin Source File + +SOURCE=.\include\enet\callbacks.h +# End Source File +# Begin Source File + +SOURCE=.\include\enet\protocol.h +# End Source File +# Begin Source File + +SOURCE=.\include\enet\time.h +# End Source File +# Begin Source File + +SOURCE=.\include\enet\types.h +# End Source File +# Begin Source File + +SOURCE=.\include\enet\unix.h +# End Source File +# Begin Source File + +SOURCE=.\include\enet\utility.h +# End Source File +# Begin Source File + +SOURCE=.\include\enet\win32.h +# End Source File +# End Group +# End Target +# End Project diff --git a/pkg/enet/enet/enet_dll.cbp b/pkg/enet/enet/enet_dll.cbp new file mode 100644 index 000000000..961274c76 --- /dev/null +++ b/pkg/enet/enet/enet_dll.cbp @@ -0,0 +1,86 @@ + + + + + + diff --git a/pkg/enet/enet/host.c b/pkg/enet/enet/host.c new file mode 100644 index 000000000..fff946a39 --- /dev/null +++ b/pkg/enet/enet/host.c @@ -0,0 +1,503 @@ +/** + @file host.c + @brief ENet host management functions +*/ +#define ENET_BUILDING_LIB 1 +#include +#include "enet/enet.h" + +/** @defgroup host ENet host functions + @{ +*/ + +/** Creates a host for communicating to peers. + + @param address the address at which other peers may connect to this host. If NULL, then no peers may connect to the host. + @param peerCount the maximum number of peers that should be allocated for the host. + @param channelLimit the maximum number of channels allowed; if 0, then this is equivalent to ENET_PROTOCOL_MAXIMUM_CHANNEL_COUNT + @param incomingBandwidth downstream bandwidth of the host in bytes/second; if 0, ENet will assume unlimited bandwidth. + @param outgoingBandwidth upstream bandwidth of the host in bytes/second; if 0, ENet will assume unlimited bandwidth. + + @returns the host on success and NULL on failure + + @remarks ENet will strategically drop packets on specific sides of a connection between hosts + to ensure the host's bandwidth is not overwhelmed. The bandwidth parameters also determine + the window size of a connection which limits the amount of reliable packets that may be in transit + at any given time. +*/ +ENetHost * +enet_host_create (const ENetAddress * address, size_t peerCount, size_t channelLimit, enet_uint32 incomingBandwidth, enet_uint32 outgoingBandwidth) +{ + ENetHost * host; + ENetPeer * currentPeer; + + if (peerCount > ENET_PROTOCOL_MAXIMUM_PEER_ID) + return NULL; + + host = (ENetHost *) enet_malloc (sizeof (ENetHost)); + if (host == NULL) + return NULL; + memset (host, 0, sizeof (ENetHost)); + + host -> peers = (ENetPeer *) enet_malloc (peerCount * sizeof (ENetPeer)); + if (host -> peers == NULL) + { + enet_free (host); + + return NULL; + } + memset (host -> peers, 0, peerCount * sizeof (ENetPeer)); + + host -> socket = enet_socket_create (ENET_SOCKET_TYPE_DATAGRAM); + if (host -> socket == ENET_SOCKET_NULL || (address != NULL && enet_socket_bind (host -> socket, address) < 0)) + { + if (host -> socket != ENET_SOCKET_NULL) + enet_socket_destroy (host -> socket); + + enet_free (host -> peers); + enet_free (host); + + return NULL; + } + + enet_socket_set_option (host -> socket, ENET_SOCKOPT_NONBLOCK, 1); + enet_socket_set_option (host -> socket, ENET_SOCKOPT_BROADCAST, 1); + enet_socket_set_option (host -> socket, ENET_SOCKOPT_RCVBUF, ENET_HOST_RECEIVE_BUFFER_SIZE); + enet_socket_set_option (host -> socket, ENET_SOCKOPT_SNDBUF, ENET_HOST_SEND_BUFFER_SIZE); + + if (address != NULL && enet_socket_get_address (host -> socket, & host -> address) < 0) + host -> address = * address; + + if (! channelLimit || channelLimit > ENET_PROTOCOL_MAXIMUM_CHANNEL_COUNT) + channelLimit = ENET_PROTOCOL_MAXIMUM_CHANNEL_COUNT; + else + if (channelLimit < ENET_PROTOCOL_MINIMUM_CHANNEL_COUNT) + channelLimit = ENET_PROTOCOL_MINIMUM_CHANNEL_COUNT; + + host -> randomSeed = (enet_uint32) (size_t) host; + host -> randomSeed += enet_host_random_seed (); + host -> randomSeed = (host -> randomSeed << 16) | (host -> randomSeed >> 16); + host -> channelLimit = channelLimit; + host -> incomingBandwidth = incomingBandwidth; + host -> outgoingBandwidth = outgoingBandwidth; + host -> bandwidthThrottleEpoch = 0; + host -> recalculateBandwidthLimits = 0; + host -> mtu = ENET_HOST_DEFAULT_MTU; + host -> peerCount = peerCount; + host -> commandCount = 0; + host -> bufferCount = 0; + host -> checksum = NULL; + host -> receivedAddress.host = ENET_HOST_ANY; + host -> receivedAddress.port = 0; + host -> receivedData = NULL; + host -> receivedDataLength = 0; + + host -> totalSentData = 0; + host -> totalSentPackets = 0; + host -> totalReceivedData = 0; + host -> totalReceivedPackets = 0; + host -> totalQueued = 0; + + host -> connectedPeers = 0; + host -> bandwidthLimitedPeers = 0; + host -> duplicatePeers = ENET_PROTOCOL_MAXIMUM_PEER_ID; + host -> maximumPacketSize = ENET_HOST_DEFAULT_MAXIMUM_PACKET_SIZE; + host -> maximumWaitingData = ENET_HOST_DEFAULT_MAXIMUM_WAITING_DATA; + + host -> compressor.context = NULL; + host -> compressor.compress = NULL; + host -> compressor.decompress = NULL; + host -> compressor.destroy = NULL; + + host -> intercept = NULL; + + enet_list_clear (& host -> dispatchQueue); + + for (currentPeer = host -> peers; + currentPeer < & host -> peers [host -> peerCount]; + ++ currentPeer) + { + currentPeer -> host = host; + currentPeer -> incomingPeerID = currentPeer - host -> peers; + currentPeer -> outgoingSessionID = currentPeer -> incomingSessionID = 0xFF; + currentPeer -> data = NULL; + + enet_list_clear (& currentPeer -> acknowledgements); + enet_list_clear (& currentPeer -> sentReliableCommands); + enet_list_clear (& currentPeer -> outgoingCommands); + enet_list_clear (& currentPeer -> outgoingSendReliableCommands); + enet_list_clear (& currentPeer -> dispatchedCommands); + + enet_peer_reset (currentPeer); + } + + return host; +} + +/** Destroys the host and all resources associated with it. + @param host pointer to the host to destroy +*/ +void +enet_host_destroy (ENetHost * host) +{ + ENetPeer * currentPeer; + + if (host == NULL) + return; + + enet_socket_destroy (host -> socket); + + for (currentPeer = host -> peers; + currentPeer < & host -> peers [host -> peerCount]; + ++ currentPeer) + { + enet_peer_reset (currentPeer); + } + + if (host -> compressor.context != NULL && host -> compressor.destroy) + (* host -> compressor.destroy) (host -> compressor.context); + + enet_free (host -> peers); + enet_free (host); +} + +enet_uint32 +enet_host_random (ENetHost * host) +{ + /* Mulberry32 by Tommy Ettinger */ + enet_uint32 n = (host -> randomSeed += 0x6D2B79F5U); + n = (n ^ (n >> 15)) * (n | 1U); + n ^= n + (n ^ (n >> 7)) * (n | 61U); + return n ^ (n >> 14); +} + +/** Initiates a connection to a foreign host. + @param host host seeking the connection + @param address destination for the connection + @param channelCount number of channels to allocate + @param data user data supplied to the receiving host + @returns a peer representing the foreign host on success, NULL on failure + @remarks The peer returned will have not completed the connection until enet_host_service() + notifies of an ENET_EVENT_TYPE_CONNECT event for the peer. +*/ +ENetPeer * +enet_host_connect (ENetHost * host, const ENetAddress * address, size_t channelCount, enet_uint32 data) +{ + ENetPeer * currentPeer; + ENetChannel * channel; + ENetProtocol command; + + if (channelCount < ENET_PROTOCOL_MINIMUM_CHANNEL_COUNT) + channelCount = ENET_PROTOCOL_MINIMUM_CHANNEL_COUNT; + else + if (channelCount > ENET_PROTOCOL_MAXIMUM_CHANNEL_COUNT) + channelCount = ENET_PROTOCOL_MAXIMUM_CHANNEL_COUNT; + + for (currentPeer = host -> peers; + currentPeer < & host -> peers [host -> peerCount]; + ++ currentPeer) + { + if (currentPeer -> state == ENET_PEER_STATE_DISCONNECTED) + break; + } + + if (currentPeer >= & host -> peers [host -> peerCount]) + return NULL; + + currentPeer -> channels = (ENetChannel *) enet_malloc (channelCount * sizeof (ENetChannel)); + if (currentPeer -> channels == NULL) + return NULL; + currentPeer -> channelCount = channelCount; + currentPeer -> state = ENET_PEER_STATE_CONNECTING; + currentPeer -> address = * address; + currentPeer -> connectID = enet_host_random (host); + currentPeer -> mtu = host -> mtu; + + if (host -> outgoingBandwidth == 0) + currentPeer -> windowSize = ENET_PROTOCOL_MAXIMUM_WINDOW_SIZE; + else + currentPeer -> windowSize = (host -> outgoingBandwidth / + ENET_PEER_WINDOW_SIZE_SCALE) * + ENET_PROTOCOL_MINIMUM_WINDOW_SIZE; + + if (currentPeer -> windowSize < ENET_PROTOCOL_MINIMUM_WINDOW_SIZE) + currentPeer -> windowSize = ENET_PROTOCOL_MINIMUM_WINDOW_SIZE; + else + if (currentPeer -> windowSize > ENET_PROTOCOL_MAXIMUM_WINDOW_SIZE) + currentPeer -> windowSize = ENET_PROTOCOL_MAXIMUM_WINDOW_SIZE; + + for (channel = currentPeer -> channels; + channel < & currentPeer -> channels [channelCount]; + ++ channel) + { + channel -> outgoingReliableSequenceNumber = 0; + channel -> outgoingUnreliableSequenceNumber = 0; + channel -> incomingReliableSequenceNumber = 0; + channel -> incomingUnreliableSequenceNumber = 0; + + enet_list_clear (& channel -> incomingReliableCommands); + enet_list_clear (& channel -> incomingUnreliableCommands); + + channel -> usedReliableWindows = 0; + memset (channel -> reliableWindows, 0, sizeof (channel -> reliableWindows)); + } + + command.header.command = ENET_PROTOCOL_COMMAND_CONNECT | ENET_PROTOCOL_COMMAND_FLAG_ACKNOWLEDGE; + command.header.channelID = 0xFF; + command.connect.outgoingPeerID = ENET_HOST_TO_NET_16 (currentPeer -> incomingPeerID); + command.connect.incomingSessionID = currentPeer -> incomingSessionID; + command.connect.outgoingSessionID = currentPeer -> outgoingSessionID; + command.connect.mtu = ENET_HOST_TO_NET_32 (currentPeer -> mtu); + command.connect.windowSize = ENET_HOST_TO_NET_32 (currentPeer -> windowSize); + command.connect.channelCount = ENET_HOST_TO_NET_32 (channelCount); + command.connect.incomingBandwidth = ENET_HOST_TO_NET_32 (host -> incomingBandwidth); + command.connect.outgoingBandwidth = ENET_HOST_TO_NET_32 (host -> outgoingBandwidth); + command.connect.packetThrottleInterval = ENET_HOST_TO_NET_32 (currentPeer -> packetThrottleInterval); + command.connect.packetThrottleAcceleration = ENET_HOST_TO_NET_32 (currentPeer -> packetThrottleAcceleration); + command.connect.packetThrottleDeceleration = ENET_HOST_TO_NET_32 (currentPeer -> packetThrottleDeceleration); + command.connect.connectID = currentPeer -> connectID; + command.connect.data = ENET_HOST_TO_NET_32 (data); + + enet_peer_queue_outgoing_command (currentPeer, & command, NULL, 0, 0); + + return currentPeer; +} + +/** Queues a packet to be sent to all peers associated with the host. + @param host host on which to broadcast the packet + @param channelID channel on which to broadcast + @param packet packet to broadcast +*/ +void +enet_host_broadcast (ENetHost * host, enet_uint8 channelID, ENetPacket * packet) +{ + ENetPeer * currentPeer; + + for (currentPeer = host -> peers; + currentPeer < & host -> peers [host -> peerCount]; + ++ currentPeer) + { + if (currentPeer -> state != ENET_PEER_STATE_CONNECTED) + continue; + + enet_peer_send (currentPeer, channelID, packet); + } + + if (packet -> referenceCount == 0) + enet_packet_destroy (packet); +} + +/** Sets the packet compressor the host should use to compress and decompress packets. + @param host host to enable or disable compression for + @param compressor callbacks for for the packet compressor; if NULL, then compression is disabled +*/ +void +enet_host_compress (ENetHost * host, const ENetCompressor * compressor) +{ + if (host -> compressor.context != NULL && host -> compressor.destroy) + (* host -> compressor.destroy) (host -> compressor.context); + + if (compressor) + host -> compressor = * compressor; + else + host -> compressor.context = NULL; +} + +/** Limits the maximum allowed channels of future incoming connections. + @param host host to limit + @param channelLimit the maximum number of channels allowed; if 0, then this is equivalent to ENET_PROTOCOL_MAXIMUM_CHANNEL_COUNT +*/ +void +enet_host_channel_limit (ENetHost * host, size_t channelLimit) +{ + if (! channelLimit || channelLimit > ENET_PROTOCOL_MAXIMUM_CHANNEL_COUNT) + channelLimit = ENET_PROTOCOL_MAXIMUM_CHANNEL_COUNT; + else + if (channelLimit < ENET_PROTOCOL_MINIMUM_CHANNEL_COUNT) + channelLimit = ENET_PROTOCOL_MINIMUM_CHANNEL_COUNT; + + host -> channelLimit = channelLimit; +} + + +/** Adjusts the bandwidth limits of a host. + @param host host to adjust + @param incomingBandwidth new incoming bandwidth + @param outgoingBandwidth new outgoing bandwidth + @remarks the incoming and outgoing bandwidth parameters are identical in function to those + specified in enet_host_create(). +*/ +void +enet_host_bandwidth_limit (ENetHost * host, enet_uint32 incomingBandwidth, enet_uint32 outgoingBandwidth) +{ + host -> incomingBandwidth = incomingBandwidth; + host -> outgoingBandwidth = outgoingBandwidth; + host -> recalculateBandwidthLimits = 1; +} + +void +enet_host_bandwidth_throttle (ENetHost * host) +{ + enet_uint32 timeCurrent = enet_time_get (), + elapsedTime = timeCurrent - host -> bandwidthThrottleEpoch, + peersRemaining = (enet_uint32) host -> connectedPeers, + dataTotal = ~0, + bandwidth = ~0, + throttle = 0, + bandwidthLimit = 0; + int needsAdjustment = host -> bandwidthLimitedPeers > 0 ? 1 : 0; + ENetPeer * peer; + ENetProtocol command; + + if (elapsedTime < ENET_HOST_BANDWIDTH_THROTTLE_INTERVAL) + return; + + host -> bandwidthThrottleEpoch = timeCurrent; + + if (peersRemaining == 0) + return; + + if (host -> outgoingBandwidth != 0) + { + dataTotal = 0; + bandwidth = (host -> outgoingBandwidth * elapsedTime) / 1000; + + for (peer = host -> peers; + peer < & host -> peers [host -> peerCount]; + ++ peer) + { + if (peer -> state != ENET_PEER_STATE_CONNECTED && peer -> state != ENET_PEER_STATE_DISCONNECT_LATER) + continue; + + dataTotal += peer -> outgoingDataTotal; + } + } + + while (peersRemaining > 0 && needsAdjustment != 0) + { + needsAdjustment = 0; + + if (dataTotal <= bandwidth) + throttle = ENET_PEER_PACKET_THROTTLE_SCALE; + else + throttle = (bandwidth * ENET_PEER_PACKET_THROTTLE_SCALE) / dataTotal; + + for (peer = host -> peers; + peer < & host -> peers [host -> peerCount]; + ++ peer) + { + enet_uint32 peerBandwidth; + + if ((peer -> state != ENET_PEER_STATE_CONNECTED && peer -> state != ENET_PEER_STATE_DISCONNECT_LATER) || + peer -> incomingBandwidth == 0 || + peer -> outgoingBandwidthThrottleEpoch == timeCurrent) + continue; + + peerBandwidth = (peer -> incomingBandwidth * elapsedTime) / 1000; + if ((throttle * peer -> outgoingDataTotal) / ENET_PEER_PACKET_THROTTLE_SCALE <= peerBandwidth) + continue; + + peer -> packetThrottleLimit = (peerBandwidth * + ENET_PEER_PACKET_THROTTLE_SCALE) / peer -> outgoingDataTotal; + + if (peer -> packetThrottleLimit == 0) + peer -> packetThrottleLimit = 1; + + if (peer -> packetThrottle > peer -> packetThrottleLimit) + peer -> packetThrottle = peer -> packetThrottleLimit; + + peer -> outgoingBandwidthThrottleEpoch = timeCurrent; + + peer -> incomingDataTotal = 0; + peer -> outgoingDataTotal = 0; + + needsAdjustment = 1; + -- peersRemaining; + bandwidth -= peerBandwidth; + dataTotal -= peerBandwidth; + } + } + + if (peersRemaining > 0) + { + if (dataTotal <= bandwidth) + throttle = ENET_PEER_PACKET_THROTTLE_SCALE; + else + throttle = (bandwidth * ENET_PEER_PACKET_THROTTLE_SCALE) / dataTotal; + + for (peer = host -> peers; + peer < & host -> peers [host -> peerCount]; + ++ peer) + { + if ((peer -> state != ENET_PEER_STATE_CONNECTED && peer -> state != ENET_PEER_STATE_DISCONNECT_LATER) || + peer -> outgoingBandwidthThrottleEpoch == timeCurrent) + continue; + + peer -> packetThrottleLimit = throttle; + + if (peer -> packetThrottle > peer -> packetThrottleLimit) + peer -> packetThrottle = peer -> packetThrottleLimit; + + peer -> incomingDataTotal = 0; + peer -> outgoingDataTotal = 0; + } + } + + if (host -> recalculateBandwidthLimits) + { + host -> recalculateBandwidthLimits = 0; + + peersRemaining = (enet_uint32) host -> connectedPeers; + bandwidth = host -> incomingBandwidth; + needsAdjustment = 1; + + if (bandwidth == 0) + bandwidthLimit = 0; + else + while (peersRemaining > 0 && needsAdjustment != 0) + { + needsAdjustment = 0; + bandwidthLimit = bandwidth / peersRemaining; + + for (peer = host -> peers; + peer < & host -> peers [host -> peerCount]; + ++ peer) + { + if ((peer -> state != ENET_PEER_STATE_CONNECTED && peer -> state != ENET_PEER_STATE_DISCONNECT_LATER) || + peer -> incomingBandwidthThrottleEpoch == timeCurrent) + continue; + + if (peer -> outgoingBandwidth > 0 && + peer -> outgoingBandwidth >= bandwidthLimit) + continue; + + peer -> incomingBandwidthThrottleEpoch = timeCurrent; + + needsAdjustment = 1; + -- peersRemaining; + bandwidth -= peer -> outgoingBandwidth; + } + } + + for (peer = host -> peers; + peer < & host -> peers [host -> peerCount]; + ++ peer) + { + if (peer -> state != ENET_PEER_STATE_CONNECTED && peer -> state != ENET_PEER_STATE_DISCONNECT_LATER) + continue; + + command.header.command = ENET_PROTOCOL_COMMAND_BANDWIDTH_LIMIT | ENET_PROTOCOL_COMMAND_FLAG_ACKNOWLEDGE; + command.header.channelID = 0xFF; + command.bandwidthLimit.outgoingBandwidth = ENET_HOST_TO_NET_32 (host -> outgoingBandwidth); + + if (peer -> incomingBandwidthThrottleEpoch == timeCurrent) + command.bandwidthLimit.incomingBandwidth = ENET_HOST_TO_NET_32 (peer -> outgoingBandwidth); + else + command.bandwidthLimit.incomingBandwidth = ENET_HOST_TO_NET_32 (bandwidthLimit); + + enet_peer_queue_outgoing_command (peer, & command, NULL, 0, 0); + } + } +} + +/** @} */ diff --git a/pkg/enet/enet/include/enet/callbacks.h b/pkg/enet/enet/include/enet/callbacks.h new file mode 100644 index 000000000..78a85d3c8 --- /dev/null +++ b/pkg/enet/enet/include/enet/callbacks.h @@ -0,0 +1,37 @@ +/** + @file callbacks.h + @brief ENet callbacks +*/ +#ifndef __ENET_CALLBACKS_H__ +#define __ENET_CALLBACKS_H__ + +#include + +typedef struct _ENetCallbacks +{ + void * (ENET_CALLBACK * malloc) (size_t size); + void (ENET_CALLBACK * free) (void * memory); + void (ENET_CALLBACK * no_memory) (void); +} ENetCallbacks; + +#ifdef __cplusplus +extern "C" +{ +#endif + +/** @defgroup callbacks ENet internal callbacks + @{ + @ingroup private +*/ + +extern void * enet_malloc (size_t); +extern void enet_free (void *); + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif /* __ENET_CALLBACKS_H__ */ + diff --git a/pkg/enet/enet/include/enet/enet.h b/pkg/enet/enet/include/enet/enet.h new file mode 100644 index 000000000..dfa2c7036 --- /dev/null +++ b/pkg/enet/enet/include/enet/enet.h @@ -0,0 +1,615 @@ +/** + @file enet.h + @brief ENet public header file +*/ +#ifndef __ENET_ENET_H__ +#define __ENET_ENET_H__ + +#include + +#ifdef _WIN32 +#include "enet/win32.h" +#else +#include "enet/unix.h" +#endif + +#include "enet/types.h" +#include "enet/protocol.h" +#include "enet/list.h" +#include "enet/callbacks.h" + +#define ENET_VERSION_MAJOR 1 +#define ENET_VERSION_MINOR 3 +#define ENET_VERSION_PATCH 18 +#define ENET_VERSION_CREATE(major, minor, patch) (((major)<<16) | ((minor)<<8) | (patch)) +#define ENET_VERSION_GET_MAJOR(version) (((version)>>16)&0xFF) +#define ENET_VERSION_GET_MINOR(version) (((version)>>8)&0xFF) +#define ENET_VERSION_GET_PATCH(version) ((version)&0xFF) +#define ENET_VERSION ENET_VERSION_CREATE(ENET_VERSION_MAJOR, ENET_VERSION_MINOR, ENET_VERSION_PATCH) + +typedef enet_uint32 ENetVersion; + +struct _ENetHost; +struct _ENetEvent; +struct _ENetPacket; + +typedef enum _ENetSocketType +{ + ENET_SOCKET_TYPE_STREAM = 1, + ENET_SOCKET_TYPE_DATAGRAM = 2 +} ENetSocketType; + +typedef enum _ENetSocketWait +{ + ENET_SOCKET_WAIT_NONE = 0, + ENET_SOCKET_WAIT_SEND = (1 << 0), + ENET_SOCKET_WAIT_RECEIVE = (1 << 1), + ENET_SOCKET_WAIT_INTERRUPT = (1 << 2) +} ENetSocketWait; + +typedef enum _ENetSocketOption +{ + ENET_SOCKOPT_NONBLOCK = 1, + ENET_SOCKOPT_BROADCAST = 2, + ENET_SOCKOPT_RCVBUF = 3, + ENET_SOCKOPT_SNDBUF = 4, + ENET_SOCKOPT_REUSEADDR = 5, + ENET_SOCKOPT_RCVTIMEO = 6, + ENET_SOCKOPT_SNDTIMEO = 7, + ENET_SOCKOPT_ERROR = 8, + ENET_SOCKOPT_NODELAY = 9, + ENET_SOCKOPT_TTL = 10 +} ENetSocketOption; + +typedef enum _ENetSocketShutdown +{ + ENET_SOCKET_SHUTDOWN_READ = 0, + ENET_SOCKET_SHUTDOWN_WRITE = 1, + ENET_SOCKET_SHUTDOWN_READ_WRITE = 2 +} ENetSocketShutdown; + +#define ENET_HOST_ANY 0 +#define ENET_HOST_BROADCAST 0xFFFFFFFFU +#define ENET_PORT_ANY 0 + +/** + * Portable internet address structure. + * + * The host must be specified in network byte-order, and the port must be in host + * byte-order. The constant ENET_HOST_ANY may be used to specify the default + * server host. The constant ENET_HOST_BROADCAST may be used to specify the + * broadcast address (255.255.255.255). This makes sense for enet_host_connect, + * but not for enet_host_create. Once a server responds to a broadcast, the + * address is updated from ENET_HOST_BROADCAST to the server's actual IP address. + */ +typedef struct _ENetAddress +{ + enet_uint32 host; + enet_uint16 port; +} ENetAddress; + +/** + * Packet flag bit constants. + * + * The host must be specified in network byte-order, and the port must be in + * host byte-order. The constant ENET_HOST_ANY may be used to specify the + * default server host. + + @sa ENetPacket +*/ +typedef enum _ENetPacketFlag +{ + /** packet must be received by the target peer and resend attempts should be + * made until the packet is delivered */ + ENET_PACKET_FLAG_RELIABLE = (1 << 0), + /** packet will not be sequenced with other packets + */ + ENET_PACKET_FLAG_UNSEQUENCED = (1 << 1), + /** packet will not allocate data, and user must supply it instead */ + ENET_PACKET_FLAG_NO_ALLOCATE = (1 << 2), + /** packet will be fragmented using unreliable (instead of reliable) sends + * if it exceeds the MTU */ + ENET_PACKET_FLAG_UNRELIABLE_FRAGMENT = (1 << 3), + + /** whether the packet has been sent from all queues it has been entered into */ + ENET_PACKET_FLAG_SENT = (1<<8) +} ENetPacketFlag; + +typedef void (ENET_CALLBACK * ENetPacketFreeCallback) (struct _ENetPacket *); + +/** + * ENet packet structure. + * + * An ENet data packet that may be sent to or received from a peer. The shown + * fields should only be read and never modified. The data field contains the + * allocated data for the packet. The dataLength fields specifies the length + * of the allocated data. The flags field is either 0 (specifying no flags), + * or a bitwise-or of any combination of the following flags: + * + * ENET_PACKET_FLAG_RELIABLE - packet must be received by the target peer + * and resend attempts should be made until the packet is delivered + * + * ENET_PACKET_FLAG_UNSEQUENCED - packet will not be sequenced with other packets + * (not supported for reliable packets) + * + * ENET_PACKET_FLAG_NO_ALLOCATE - packet will not allocate data, and user must supply it instead + * + * ENET_PACKET_FLAG_UNRELIABLE_FRAGMENT - packet will be fragmented using unreliable + * (instead of reliable) sends if it exceeds the MTU + * + * ENET_PACKET_FLAG_SENT - whether the packet has been sent from all queues it has been entered into + @sa ENetPacketFlag + */ +typedef struct _ENetPacket +{ + size_t referenceCount; /**< internal use only */ + enet_uint32 flags; /**< bitwise-or of ENetPacketFlag constants */ + enet_uint8 * data; /**< allocated data for packet */ + size_t dataLength; /**< length of data */ + ENetPacketFreeCallback freeCallback; /**< function to be called when the packet is no longer in use */ + void * userData; /**< application private data, may be freely modified */ +} ENetPacket; + +typedef struct _ENetAcknowledgement +{ + ENetListNode acknowledgementList; + enet_uint32 sentTime; + ENetProtocol command; +} ENetAcknowledgement; + +typedef struct _ENetOutgoingCommand +{ + ENetListNode outgoingCommandList; + enet_uint16 reliableSequenceNumber; + enet_uint16 unreliableSequenceNumber; + enet_uint32 sentTime; + enet_uint32 roundTripTimeout; + enet_uint32 queueTime; + enet_uint32 fragmentOffset; + enet_uint16 fragmentLength; + enet_uint16 sendAttempts; + ENetProtocol command; + ENetPacket * packet; +} ENetOutgoingCommand; + +typedef struct _ENetIncomingCommand +{ + ENetListNode incomingCommandList; + enet_uint16 reliableSequenceNumber; + enet_uint16 unreliableSequenceNumber; + ENetProtocol command; + enet_uint32 fragmentCount; + enet_uint32 fragmentsRemaining; + enet_uint32 * fragments; + ENetPacket * packet; +} ENetIncomingCommand; + +typedef enum _ENetPeerState +{ + ENET_PEER_STATE_DISCONNECTED = 0, + ENET_PEER_STATE_CONNECTING = 1, + ENET_PEER_STATE_ACKNOWLEDGING_CONNECT = 2, + ENET_PEER_STATE_CONNECTION_PENDING = 3, + ENET_PEER_STATE_CONNECTION_SUCCEEDED = 4, + ENET_PEER_STATE_CONNECTED = 5, + ENET_PEER_STATE_DISCONNECT_LATER = 6, + ENET_PEER_STATE_DISCONNECTING = 7, + ENET_PEER_STATE_ACKNOWLEDGING_DISCONNECT = 8, + ENET_PEER_STATE_ZOMBIE = 9 +} ENetPeerState; + +#ifndef ENET_BUFFER_MAXIMUM +#define ENET_BUFFER_MAXIMUM (1 + 2 * ENET_PROTOCOL_MAXIMUM_PACKET_COMMANDS) +#endif + +enum +{ + ENET_HOST_RECEIVE_BUFFER_SIZE = 256 * 1024, + ENET_HOST_SEND_BUFFER_SIZE = 256 * 1024, + ENET_HOST_BANDWIDTH_THROTTLE_INTERVAL = 1000, + ENET_HOST_DEFAULT_MTU = 1392, + ENET_HOST_DEFAULT_MAXIMUM_PACKET_SIZE = 32 * 1024 * 1024, + ENET_HOST_DEFAULT_MAXIMUM_WAITING_DATA = 32 * 1024 * 1024, + + ENET_PEER_DEFAULT_ROUND_TRIP_TIME = 500, + ENET_PEER_DEFAULT_PACKET_THROTTLE = 32, + ENET_PEER_PACKET_THROTTLE_SCALE = 32, + ENET_PEER_PACKET_THROTTLE_COUNTER = 7, + ENET_PEER_PACKET_THROTTLE_ACCELERATION = 2, + ENET_PEER_PACKET_THROTTLE_DECELERATION = 2, + ENET_PEER_PACKET_THROTTLE_INTERVAL = 5000, + ENET_PEER_PACKET_LOSS_SCALE = (1 << 16), + ENET_PEER_PACKET_LOSS_INTERVAL = 10000, + ENET_PEER_WINDOW_SIZE_SCALE = 64 * 1024, + ENET_PEER_TIMEOUT_LIMIT = 32, + ENET_PEER_TIMEOUT_MINIMUM = 5000, + ENET_PEER_TIMEOUT_MAXIMUM = 30000, + ENET_PEER_PING_INTERVAL = 500, + ENET_PEER_UNSEQUENCED_WINDOWS = 64, + ENET_PEER_UNSEQUENCED_WINDOW_SIZE = 1024, + ENET_PEER_FREE_UNSEQUENCED_WINDOWS = 32, + ENET_PEER_RELIABLE_WINDOWS = 16, + ENET_PEER_RELIABLE_WINDOW_SIZE = 0x1000, + ENET_PEER_FREE_RELIABLE_WINDOWS = 8 +}; + +typedef struct _ENetChannel +{ + enet_uint16 outgoingReliableSequenceNumber; + enet_uint16 outgoingUnreliableSequenceNumber; + enet_uint16 usedReliableWindows; + enet_uint16 reliableWindows [ENET_PEER_RELIABLE_WINDOWS]; + enet_uint16 incomingReliableSequenceNumber; + enet_uint16 incomingUnreliableSequenceNumber; + ENetList incomingReliableCommands; + ENetList incomingUnreliableCommands; +} ENetChannel; + +typedef enum _ENetPeerFlag +{ + ENET_PEER_FLAG_NEEDS_DISPATCH = (1 << 0), + ENET_PEER_FLAG_CONTINUE_SENDING = (1 << 1) +} ENetPeerFlag; + +/** + * An ENet peer which data packets may be sent or received from. + * + * No fields should be modified unless otherwise specified. + */ +typedef struct _ENetPeer +{ + ENetListNode dispatchList; + struct _ENetHost * host; + enet_uint16 outgoingPeerID; + enet_uint16 incomingPeerID; + enet_uint32 connectID; + enet_uint8 outgoingSessionID; + enet_uint8 incomingSessionID; + ENetAddress address; /**< Internet address of the peer */ + void * data; /**< Application private data, may be freely modified */ + ENetPeerState state; + ENetChannel * channels; + size_t channelCount; /**< Number of channels allocated for communication with peer */ + enet_uint32 incomingBandwidth; /**< Downstream bandwidth of the client in bytes/second */ + enet_uint32 outgoingBandwidth; /**< Upstream bandwidth of the client in bytes/second */ + enet_uint32 incomingBandwidthThrottleEpoch; + enet_uint32 outgoingBandwidthThrottleEpoch; + enet_uint32 incomingDataTotal; + enet_uint32 outgoingDataTotal; + enet_uint32 lastSendTime; + enet_uint32 lastReceiveTime; + enet_uint32 nextTimeout; + enet_uint32 earliestTimeout; + enet_uint32 packetLossEpoch; + enet_uint32 packetsSent; + enet_uint32 packetsLost; + enet_uint32 packetLoss; /**< mean packet loss of reliable packets as a ratio with respect to the constant ENET_PEER_PACKET_LOSS_SCALE */ + enet_uint32 packetLossVariance; + enet_uint32 packetThrottle; + enet_uint32 packetThrottleLimit; + enet_uint32 packetThrottleCounter; + enet_uint32 packetThrottleEpoch; + enet_uint32 packetThrottleAcceleration; + enet_uint32 packetThrottleDeceleration; + enet_uint32 packetThrottleInterval; + enet_uint32 pingInterval; + enet_uint32 timeoutLimit; + enet_uint32 timeoutMinimum; + enet_uint32 timeoutMaximum; + enet_uint32 lastRoundTripTime; + enet_uint32 lowestRoundTripTime; + enet_uint32 lastRoundTripTimeVariance; + enet_uint32 highestRoundTripTimeVariance; + enet_uint32 roundTripTime; /**< mean round trip time (RTT), in milliseconds, between sending a reliable packet and receiving its acknowledgement */ + enet_uint32 roundTripTimeVariance; + enet_uint32 mtu; + enet_uint32 windowSize; + enet_uint32 reliableDataInTransit; + enet_uint16 outgoingReliableSequenceNumber; + ENetList acknowledgements; + ENetList sentReliableCommands; + ENetList outgoingSendReliableCommands; + ENetList outgoingCommands; + ENetList dispatchedCommands; + enet_uint16 flags; + enet_uint16 reserved; + enet_uint16 incomingUnsequencedGroup; + enet_uint16 outgoingUnsequencedGroup; + enet_uint32 unsequencedWindow [ENET_PEER_UNSEQUENCED_WINDOW_SIZE / 32]; + enet_uint32 eventData; + size_t totalWaitingData; +} ENetPeer; + +/** An ENet packet compressor for compressing UDP packets before socket sends or receives. + */ +typedef struct _ENetCompressor +{ + /** Context data for the compressor. Must be non-NULL. */ + void * context; + /** Compresses from inBuffers[0:inBufferCount-1], containing inLimit bytes, to outData, outputting at most outLimit bytes. Should return 0 on failure. */ + size_t (ENET_CALLBACK * compress) (void * context, const ENetBuffer * inBuffers, size_t inBufferCount, size_t inLimit, enet_uint8 * outData, size_t outLimit); + /** Decompresses from inData, containing inLimit bytes, to outData, outputting at most outLimit bytes. Should return 0 on failure. */ + size_t (ENET_CALLBACK * decompress) (void * context, const enet_uint8 * inData, size_t inLimit, enet_uint8 * outData, size_t outLimit); + /** Destroys the context when compression is disabled or the host is destroyed. May be NULL. */ + void (ENET_CALLBACK * destroy) (void * context); +} ENetCompressor; + +/** Callback that computes the checksum of the data held in buffers[0:bufferCount-1] */ +typedef enet_uint32 (ENET_CALLBACK * ENetChecksumCallback) (const ENetBuffer * buffers, size_t bufferCount); + +/** Callback for intercepting received raw UDP packets. Should return 1 to intercept, 0 to ignore, or -1 to propagate an error. */ +typedef int (ENET_CALLBACK * ENetInterceptCallback) (struct _ENetHost * host, struct _ENetEvent * event); + +/** An ENet host for communicating with peers. + * + * No fields should be modified unless otherwise stated. + + @sa enet_host_create() + @sa enet_host_destroy() + @sa enet_host_connect() + @sa enet_host_service() + @sa enet_host_flush() + @sa enet_host_broadcast() + @sa enet_host_compress() + @sa enet_host_compress_with_range_coder() + @sa enet_host_channel_limit() + @sa enet_host_bandwidth_limit() + @sa enet_host_bandwidth_throttle() + */ +typedef struct _ENetHost +{ + ENetSocket socket; + ENetAddress address; /**< Internet address of the host */ + enet_uint32 incomingBandwidth; /**< downstream bandwidth of the host */ + enet_uint32 outgoingBandwidth; /**< upstream bandwidth of the host */ + enet_uint32 bandwidthThrottleEpoch; + enet_uint32 mtu; + enet_uint32 randomSeed; + int recalculateBandwidthLimits; + ENetPeer * peers; /**< array of peers allocated for this host */ + size_t peerCount; /**< number of peers allocated for this host */ + size_t channelLimit; /**< maximum number of channels allowed for connected peers */ + enet_uint32 serviceTime; + ENetList dispatchQueue; + enet_uint32 totalQueued; + size_t packetSize; + enet_uint16 headerFlags; + ENetProtocol commands [ENET_PROTOCOL_MAXIMUM_PACKET_COMMANDS]; + size_t commandCount; + ENetBuffer buffers [ENET_BUFFER_MAXIMUM]; + size_t bufferCount; + ENetChecksumCallback checksum; /**< callback the user can set to enable packet checksums for this host */ + ENetCompressor compressor; + enet_uint8 packetData [2][ENET_PROTOCOL_MAXIMUM_MTU]; + ENetAddress receivedAddress; + enet_uint8 * receivedData; + size_t receivedDataLength; + enet_uint32 totalSentData; /**< total data sent, user should reset to 0 as needed to prevent overflow */ + enet_uint32 totalSentPackets; /**< total UDP packets sent, user should reset to 0 as needed to prevent overflow */ + enet_uint32 totalReceivedData; /**< total data received, user should reset to 0 as needed to prevent overflow */ + enet_uint32 totalReceivedPackets; /**< total UDP packets received, user should reset to 0 as needed to prevent overflow */ + ENetInterceptCallback intercept; /**< callback the user can set to intercept received raw UDP packets */ + size_t connectedPeers; + size_t bandwidthLimitedPeers; + size_t duplicatePeers; /**< optional number of allowed peers from duplicate IPs, defaults to ENET_PROTOCOL_MAXIMUM_PEER_ID */ + size_t maximumPacketSize; /**< the maximum allowable packet size that may be sent or received on a peer */ + size_t maximumWaitingData; /**< the maximum aggregate amount of buffer space a peer may use waiting for packets to be delivered */ +} ENetHost; + +/** + * An ENet event type, as specified in @ref ENetEvent. + */ +typedef enum _ENetEventType +{ + /** no event occurred within the specified time limit */ + ENET_EVENT_TYPE_NONE = 0, + + /** a connection request initiated by enet_host_connect has completed. + * The peer field contains the peer which successfully connected. + */ + ENET_EVENT_TYPE_CONNECT = 1, + + /** a peer has disconnected. This event is generated on a successful + * completion of a disconnect initiated by enet_peer_disconnect, if + * a peer has timed out, or if a connection request intialized by + * enet_host_connect has timed out. The peer field contains the peer + * which disconnected. The data field contains user supplied data + * describing the disconnection, or 0, if none is available. + */ + ENET_EVENT_TYPE_DISCONNECT = 2, + + /** a packet has been received from a peer. The peer field specifies the + * peer which sent the packet. The channelID field specifies the channel + * number upon which the packet was received. The packet field contains + * the packet that was received; this packet must be destroyed with + * enet_packet_destroy after use. + */ + ENET_EVENT_TYPE_RECEIVE = 3 +} ENetEventType; + +/** + * An ENet event as returned by enet_host_service(). + + @sa enet_host_service + */ +typedef struct _ENetEvent +{ + ENetEventType type; /**< type of the event */ + ENetPeer * peer; /**< peer that generated a connect, disconnect or receive event */ + enet_uint8 channelID; /**< channel on the peer that generated the event, if appropriate */ + enet_uint32 data; /**< data associated with the event, if appropriate */ + ENetPacket * packet; /**< packet associated with the event, if appropriate */ +} ENetEvent; + +#ifdef __cplusplus +extern "C" +{ +#endif + +/** @defgroup global ENet global functions + @{ +*/ + +/** + Initializes ENet globally. Must be called prior to using any functions in + ENet. + @returns 0 on success, < 0 on failure +*/ +ENET_API int enet_initialize (void); + +/** + Initializes ENet globally and supplies user-overridden callbacks. Must be called prior to using any functions in ENet. Do not use enet_initialize() if you use this variant. Make sure the ENetCallbacks structure is zeroed out so that any additional callbacks added in future versions will be properly ignored. + + @param version the constant ENET_VERSION should be supplied so ENet knows which version of ENetCallbacks struct to use + @param inits user-overridden callbacks where any NULL callbacks will use ENet's defaults + @returns 0 on success, < 0 on failure +*/ +ENET_API int enet_initialize_with_callbacks (ENetVersion version, const ENetCallbacks * inits); + +/** + Shuts down ENet globally. Should be called when a program that has + initialized ENet exits. +*/ +ENET_API void enet_deinitialize (void); + +/** + Gives the linked version of the ENet library. + @returns the version number +*/ +ENET_API ENetVersion enet_linked_version (void); + +/** @} */ + +/** @defgroup private ENet private implementation functions */ + +/** + Returns the wall-time in milliseconds. Its initial value is unspecified + unless otherwise set. + */ +ENET_API enet_uint32 enet_time_get (void); +/** + Sets the current wall-time in milliseconds. + */ +ENET_API void enet_time_set (enet_uint32); + +/** @defgroup socket ENet socket functions + @{ +*/ +ENET_API ENetSocket enet_socket_create (ENetSocketType); +ENET_API int enet_socket_bind (ENetSocket, const ENetAddress *); +ENET_API int enet_socket_get_address (ENetSocket, ENetAddress *); +ENET_API int enet_socket_listen (ENetSocket, int); +ENET_API ENetSocket enet_socket_accept (ENetSocket, ENetAddress *); +ENET_API int enet_socket_connect (ENetSocket, const ENetAddress *); +ENET_API int enet_socket_send (ENetSocket, const ENetAddress *, const ENetBuffer *, size_t); +ENET_API int enet_socket_receive (ENetSocket, ENetAddress *, ENetBuffer *, size_t); +ENET_API int enet_socket_wait (ENetSocket, enet_uint32 *, enet_uint32); +ENET_API int enet_socket_set_option (ENetSocket, ENetSocketOption, int); +ENET_API int enet_socket_get_option (ENetSocket, ENetSocketOption, int *); +ENET_API int enet_socket_shutdown (ENetSocket, ENetSocketShutdown); +ENET_API void enet_socket_destroy (ENetSocket); +ENET_API int enet_socketset_select (ENetSocket, ENetSocketSet *, ENetSocketSet *, enet_uint32); + +/** @} */ + +/** @defgroup Address ENet address functions + @{ +*/ + +/** Attempts to parse the printable form of the IP address in the parameter hostName + and sets the host field in the address parameter if successful. + @param address destination to store the parsed IP address + @param hostName IP address to parse + @retval 0 on success + @retval < 0 on failure + @returns the address of the given hostName in address on success +*/ +ENET_API int enet_address_set_host_ip (ENetAddress * address, const char * hostName); + +/** Attempts to resolve the host named by the parameter hostName and sets + the host field in the address parameter if successful. + @param address destination to store resolved address + @param hostName host name to lookup + @retval 0 on success + @retval < 0 on failure + @returns the address of the given hostName in address on success +*/ +ENET_API int enet_address_set_host (ENetAddress * address, const char * hostName); + +/** Gives the printable form of the IP address specified in the address parameter. + @param address address printed + @param hostName destination for name, must not be NULL + @param nameLength maximum length of hostName. + @returns the null-terminated name of the host in hostName on success + @retval 0 on success + @retval < 0 on failure +*/ +ENET_API int enet_address_get_host_ip (const ENetAddress * address, char * hostName, size_t nameLength); + +/** Attempts to do a reverse lookup of the host field in the address parameter. + @param address address used for reverse lookup + @param hostName destination for name, must not be NULL + @param nameLength maximum length of hostName. + @returns the null-terminated name of the host in hostName on success + @retval 0 on success + @retval < 0 on failure +*/ +ENET_API int enet_address_get_host (const ENetAddress * address, char * hostName, size_t nameLength); + +/** @} */ + +ENET_API ENetPacket * enet_packet_create (const void *, size_t, enet_uint32); +ENET_API void enet_packet_destroy (ENetPacket *); +ENET_API int enet_packet_resize (ENetPacket *, size_t); +ENET_API enet_uint32 enet_crc32 (const ENetBuffer *, size_t); + +ENET_API ENetHost * enet_host_create (const ENetAddress *, size_t, size_t, enet_uint32, enet_uint32); +ENET_API void enet_host_destroy (ENetHost *); +ENET_API ENetPeer * enet_host_connect (ENetHost *, const ENetAddress *, size_t, enet_uint32); +ENET_API int enet_host_check_events (ENetHost *, ENetEvent *); +ENET_API int enet_host_service (ENetHost *, ENetEvent *, enet_uint32); +ENET_API void enet_host_flush (ENetHost *); +ENET_API void enet_host_broadcast (ENetHost *, enet_uint8, ENetPacket *); +ENET_API void enet_host_compress (ENetHost *, const ENetCompressor *); +ENET_API int enet_host_compress_with_range_coder (ENetHost * host); +ENET_API void enet_host_channel_limit (ENetHost *, size_t); +ENET_API void enet_host_bandwidth_limit (ENetHost *, enet_uint32, enet_uint32); +extern void enet_host_bandwidth_throttle (ENetHost *); +extern enet_uint32 enet_host_random_seed (void); +extern enet_uint32 enet_host_random (ENetHost *); + +ENET_API int enet_peer_send (ENetPeer *, enet_uint8, ENetPacket *); +ENET_API ENetPacket * enet_peer_receive (ENetPeer *, enet_uint8 * channelID); +ENET_API void enet_peer_ping (ENetPeer *); +ENET_API void enet_peer_ping_interval (ENetPeer *, enet_uint32); +ENET_API void enet_peer_timeout (ENetPeer *, enet_uint32, enet_uint32, enet_uint32); +ENET_API void enet_peer_reset (ENetPeer *); +ENET_API void enet_peer_disconnect (ENetPeer *, enet_uint32); +ENET_API void enet_peer_disconnect_now (ENetPeer *, enet_uint32); +ENET_API void enet_peer_disconnect_later (ENetPeer *, enet_uint32); +ENET_API void enet_peer_throttle_configure (ENetPeer *, enet_uint32, enet_uint32, enet_uint32); +extern int enet_peer_throttle (ENetPeer *, enet_uint32); +extern void enet_peer_reset_queues (ENetPeer *); +extern int enet_peer_has_outgoing_commands (ENetPeer *); +extern void enet_peer_setup_outgoing_command (ENetPeer *, ENetOutgoingCommand *); +extern ENetOutgoingCommand * enet_peer_queue_outgoing_command (ENetPeer *, const ENetProtocol *, ENetPacket *, enet_uint32, enet_uint16); +extern ENetIncomingCommand * enet_peer_queue_incoming_command (ENetPeer *, const ENetProtocol *, const void *, size_t, enet_uint32, enet_uint32); +extern ENetAcknowledgement * enet_peer_queue_acknowledgement (ENetPeer *, const ENetProtocol *, enet_uint16); +extern void enet_peer_dispatch_incoming_unreliable_commands (ENetPeer *, ENetChannel *, ENetIncomingCommand *); +extern void enet_peer_dispatch_incoming_reliable_commands (ENetPeer *, ENetChannel *, ENetIncomingCommand *); +extern void enet_peer_on_connect (ENetPeer *); +extern void enet_peer_on_disconnect (ENetPeer *); + +ENET_API void * enet_range_coder_create (void); +ENET_API void enet_range_coder_destroy (void *); +ENET_API size_t enet_range_coder_compress (void *, const ENetBuffer *, size_t, size_t, enet_uint8 *, size_t); +ENET_API size_t enet_range_coder_decompress (void *, const enet_uint8 *, size_t, enet_uint8 *, size_t); + +extern size_t enet_protocol_command_size (enet_uint8); + +#ifdef __cplusplus +} +#endif + +#endif /* __ENET_ENET_H__ */ + diff --git a/pkg/enet/enet/include/enet/list.h b/pkg/enet/enet/include/enet/list.h new file mode 100644 index 000000000..076e8860f --- /dev/null +++ b/pkg/enet/enet/include/enet/list.h @@ -0,0 +1,52 @@ +/** + @file list.h + @brief ENet list management +*/ +#ifndef __ENET_LIST_H__ +#define __ENET_LIST_H__ + +#include + +typedef struct _ENetListNode +{ + struct _ENetListNode * next; + struct _ENetListNode * previous; +} ENetListNode; + +typedef ENetListNode * ENetListIterator; + +typedef struct _ENetList +{ + ENetListNode sentinel; +} ENetList; + +#ifdef __cplusplus +extern "C" +{ +#endif + +extern void enet_list_clear (ENetList *); + +extern ENetListIterator enet_list_insert (ENetListIterator, void *); +extern void * enet_list_remove (ENetListIterator); +extern ENetListIterator enet_list_move (ENetListIterator, void *, void *); + +extern size_t enet_list_size (ENetList *); + +#ifdef __cplusplus +} +#endif + +#define enet_list_begin(list) ((list) -> sentinel.next) +#define enet_list_end(list) (& (list) -> sentinel) + +#define enet_list_empty(list) (enet_list_begin (list) == enet_list_end (list)) + +#define enet_list_next(iterator) ((iterator) -> next) +#define enet_list_previous(iterator) ((iterator) -> previous) + +#define enet_list_front(list) ((void *) (list) -> sentinel.next) +#define enet_list_back(list) ((void *) (list) -> sentinel.previous) + +#endif /* __ENET_LIST_H__ */ + diff --git a/pkg/enet/enet/include/enet/protocol.h b/pkg/enet/enet/include/enet/protocol.h new file mode 100644 index 000000000..f8c73d8a6 --- /dev/null +++ b/pkg/enet/enet/include/enet/protocol.h @@ -0,0 +1,198 @@ +/** + @file protocol.h + @brief ENet protocol +*/ +#ifndef __ENET_PROTOCOL_H__ +#define __ENET_PROTOCOL_H__ + +#include "enet/types.h" + +enum +{ + ENET_PROTOCOL_MINIMUM_MTU = 576, + ENET_PROTOCOL_MAXIMUM_MTU = 4096, + ENET_PROTOCOL_MAXIMUM_PACKET_COMMANDS = 32, + ENET_PROTOCOL_MINIMUM_WINDOW_SIZE = 4096, + ENET_PROTOCOL_MAXIMUM_WINDOW_SIZE = 65536, + ENET_PROTOCOL_MINIMUM_CHANNEL_COUNT = 1, + ENET_PROTOCOL_MAXIMUM_CHANNEL_COUNT = 255, + ENET_PROTOCOL_MAXIMUM_PEER_ID = 0xFFF, + ENET_PROTOCOL_MAXIMUM_FRAGMENT_COUNT = 1024 * 1024 +}; + +typedef enum _ENetProtocolCommand +{ + ENET_PROTOCOL_COMMAND_NONE = 0, + ENET_PROTOCOL_COMMAND_ACKNOWLEDGE = 1, + ENET_PROTOCOL_COMMAND_CONNECT = 2, + ENET_PROTOCOL_COMMAND_VERIFY_CONNECT = 3, + ENET_PROTOCOL_COMMAND_DISCONNECT = 4, + ENET_PROTOCOL_COMMAND_PING = 5, + ENET_PROTOCOL_COMMAND_SEND_RELIABLE = 6, + ENET_PROTOCOL_COMMAND_SEND_UNRELIABLE = 7, + ENET_PROTOCOL_COMMAND_SEND_FRAGMENT = 8, + ENET_PROTOCOL_COMMAND_SEND_UNSEQUENCED = 9, + ENET_PROTOCOL_COMMAND_BANDWIDTH_LIMIT = 10, + ENET_PROTOCOL_COMMAND_THROTTLE_CONFIGURE = 11, + ENET_PROTOCOL_COMMAND_SEND_UNRELIABLE_FRAGMENT = 12, + ENET_PROTOCOL_COMMAND_COUNT = 13, + + ENET_PROTOCOL_COMMAND_MASK = 0x0F +} ENetProtocolCommand; + +typedef enum _ENetProtocolFlag +{ + ENET_PROTOCOL_COMMAND_FLAG_ACKNOWLEDGE = (1 << 7), + ENET_PROTOCOL_COMMAND_FLAG_UNSEQUENCED = (1 << 6), + + ENET_PROTOCOL_HEADER_FLAG_COMPRESSED = (1 << 14), + ENET_PROTOCOL_HEADER_FLAG_SENT_TIME = (1 << 15), + ENET_PROTOCOL_HEADER_FLAG_MASK = ENET_PROTOCOL_HEADER_FLAG_COMPRESSED | ENET_PROTOCOL_HEADER_FLAG_SENT_TIME, + + ENET_PROTOCOL_HEADER_SESSION_MASK = (3 << 12), + ENET_PROTOCOL_HEADER_SESSION_SHIFT = 12 +} ENetProtocolFlag; + +#ifdef _MSC_VER +#pragma pack(push, 1) +#define ENET_PACKED +#elif defined(__GNUC__) || defined(__clang__) +#define ENET_PACKED __attribute__ ((packed)) +#else +#define ENET_PACKED +#endif + +typedef struct _ENetProtocolHeader +{ + enet_uint16 peerID; + enet_uint16 sentTime; +} ENET_PACKED ENetProtocolHeader; + +typedef struct _ENetProtocolCommandHeader +{ + enet_uint8 command; + enet_uint8 channelID; + enet_uint16 reliableSequenceNumber; +} ENET_PACKED ENetProtocolCommandHeader; + +typedef struct _ENetProtocolAcknowledge +{ + ENetProtocolCommandHeader header; + enet_uint16 receivedReliableSequenceNumber; + enet_uint16 receivedSentTime; +} ENET_PACKED ENetProtocolAcknowledge; + +typedef struct _ENetProtocolConnect +{ + ENetProtocolCommandHeader header; + enet_uint16 outgoingPeerID; + enet_uint8 incomingSessionID; + enet_uint8 outgoingSessionID; + enet_uint32 mtu; + enet_uint32 windowSize; + enet_uint32 channelCount; + enet_uint32 incomingBandwidth; + enet_uint32 outgoingBandwidth; + enet_uint32 packetThrottleInterval; + enet_uint32 packetThrottleAcceleration; + enet_uint32 packetThrottleDeceleration; + enet_uint32 connectID; + enet_uint32 data; +} ENET_PACKED ENetProtocolConnect; + +typedef struct _ENetProtocolVerifyConnect +{ + ENetProtocolCommandHeader header; + enet_uint16 outgoingPeerID; + enet_uint8 incomingSessionID; + enet_uint8 outgoingSessionID; + enet_uint32 mtu; + enet_uint32 windowSize; + enet_uint32 channelCount; + enet_uint32 incomingBandwidth; + enet_uint32 outgoingBandwidth; + enet_uint32 packetThrottleInterval; + enet_uint32 packetThrottleAcceleration; + enet_uint32 packetThrottleDeceleration; + enet_uint32 connectID; +} ENET_PACKED ENetProtocolVerifyConnect; + +typedef struct _ENetProtocolBandwidthLimit +{ + ENetProtocolCommandHeader header; + enet_uint32 incomingBandwidth; + enet_uint32 outgoingBandwidth; +} ENET_PACKED ENetProtocolBandwidthLimit; + +typedef struct _ENetProtocolThrottleConfigure +{ + ENetProtocolCommandHeader header; + enet_uint32 packetThrottleInterval; + enet_uint32 packetThrottleAcceleration; + enet_uint32 packetThrottleDeceleration; +} ENET_PACKED ENetProtocolThrottleConfigure; + +typedef struct _ENetProtocolDisconnect +{ + ENetProtocolCommandHeader header; + enet_uint32 data; +} ENET_PACKED ENetProtocolDisconnect; + +typedef struct _ENetProtocolPing +{ + ENetProtocolCommandHeader header; +} ENET_PACKED ENetProtocolPing; + +typedef struct _ENetProtocolSendReliable +{ + ENetProtocolCommandHeader header; + enet_uint16 dataLength; +} ENET_PACKED ENetProtocolSendReliable; + +typedef struct _ENetProtocolSendUnreliable +{ + ENetProtocolCommandHeader header; + enet_uint16 unreliableSequenceNumber; + enet_uint16 dataLength; +} ENET_PACKED ENetProtocolSendUnreliable; + +typedef struct _ENetProtocolSendUnsequenced +{ + ENetProtocolCommandHeader header; + enet_uint16 unsequencedGroup; + enet_uint16 dataLength; +} ENET_PACKED ENetProtocolSendUnsequenced; + +typedef struct _ENetProtocolSendFragment +{ + ENetProtocolCommandHeader header; + enet_uint16 startSequenceNumber; + enet_uint16 dataLength; + enet_uint32 fragmentCount; + enet_uint32 fragmentNumber; + enet_uint32 totalLength; + enet_uint32 fragmentOffset; +} ENET_PACKED ENetProtocolSendFragment; + +typedef union _ENetProtocol +{ + ENetProtocolCommandHeader header; + ENetProtocolAcknowledge acknowledge; + ENetProtocolConnect connect; + ENetProtocolVerifyConnect verifyConnect; + ENetProtocolDisconnect disconnect; + ENetProtocolPing ping; + ENetProtocolSendReliable sendReliable; + ENetProtocolSendUnreliable sendUnreliable; + ENetProtocolSendUnsequenced sendUnsequenced; + ENetProtocolSendFragment sendFragment; + ENetProtocolBandwidthLimit bandwidthLimit; + ENetProtocolThrottleConfigure throttleConfigure; +} ENET_PACKED ENetProtocol; + +#ifdef _MSC_VER +#pragma pack(pop) +#endif + +#endif /* __ENET_PROTOCOL_H__ */ + diff --git a/pkg/enet/enet/include/enet/time.h b/pkg/enet/enet/include/enet/time.h new file mode 100644 index 000000000..c82a54603 --- /dev/null +++ b/pkg/enet/enet/include/enet/time.h @@ -0,0 +1,18 @@ +/** + @file time.h + @brief ENet time constants and macros +*/ +#ifndef __ENET_TIME_H__ +#define __ENET_TIME_H__ + +#define ENET_TIME_OVERFLOW 86400000 + +#define ENET_TIME_LESS(a, b) ((a) - (b) >= ENET_TIME_OVERFLOW) +#define ENET_TIME_GREATER(a, b) ((b) - (a) >= ENET_TIME_OVERFLOW) +#define ENET_TIME_LESS_EQUAL(a, b) (! ENET_TIME_GREATER (a, b)) +#define ENET_TIME_GREATER_EQUAL(a, b) (! ENET_TIME_LESS (a, b)) + +#define ENET_TIME_DIFFERENCE(a, b) ((a) - (b) >= ENET_TIME_OVERFLOW ? (b) - (a) : (a) - (b)) + +#endif /* __ENET_TIME_H__ */ + diff --git a/pkg/enet/enet/include/enet/types.h b/pkg/enet/enet/include/enet/types.h new file mode 100644 index 000000000..ab010a4b1 --- /dev/null +++ b/pkg/enet/enet/include/enet/types.h @@ -0,0 +1,13 @@ +/** + @file types.h + @brief type definitions for ENet +*/ +#ifndef __ENET_TYPES_H__ +#define __ENET_TYPES_H__ + +typedef unsigned char enet_uint8; /**< unsigned 8-bit type */ +typedef unsigned short enet_uint16; /**< unsigned 16-bit type */ +typedef unsigned int enet_uint32; /**< unsigned 32-bit type */ + +#endif /* __ENET_TYPES_H__ */ + diff --git a/pkg/enet/enet/include/enet/unix.h b/pkg/enet/enet/include/enet/unix.h new file mode 100644 index 000000000..b55be3310 --- /dev/null +++ b/pkg/enet/enet/include/enet/unix.h @@ -0,0 +1,48 @@ +/** + @file unix.h + @brief ENet Unix header +*/ +#ifndef __ENET_UNIX_H__ +#define __ENET_UNIX_H__ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef MSG_MAXIOVLEN +#define ENET_BUFFER_MAXIMUM MSG_MAXIOVLEN +#endif + +typedef int ENetSocket; + +#define ENET_SOCKET_NULL -1 + +#define ENET_HOST_TO_NET_16(value) (htons (value)) /**< macro that converts host to net byte-order of a 16-bit value */ +#define ENET_HOST_TO_NET_32(value) (htonl (value)) /**< macro that converts host to net byte-order of a 32-bit value */ + +#define ENET_NET_TO_HOST_16(value) (ntohs (value)) /**< macro that converts net to host byte-order of a 16-bit value */ +#define ENET_NET_TO_HOST_32(value) (ntohl (value)) /**< macro that converts net to host byte-order of a 32-bit value */ + +typedef struct +{ + void * data; + size_t dataLength; +} ENetBuffer; + +#define ENET_CALLBACK + +#define ENET_API extern + +typedef fd_set ENetSocketSet; + +#define ENET_SOCKETSET_EMPTY(sockset) FD_ZERO (& (sockset)) +#define ENET_SOCKETSET_ADD(sockset, socket) FD_SET (socket, & (sockset)) +#define ENET_SOCKETSET_REMOVE(sockset, socket) FD_CLR (socket, & (sockset)) +#define ENET_SOCKETSET_CHECK(sockset, socket) FD_ISSET (socket, & (sockset)) + +#endif /* __ENET_UNIX_H__ */ + diff --git a/pkg/enet/enet/include/enet/utility.h b/pkg/enet/enet/include/enet/utility.h new file mode 100644 index 000000000..b04bb7a5b --- /dev/null +++ b/pkg/enet/enet/include/enet/utility.h @@ -0,0 +1,13 @@ +/** + @file utility.h + @brief ENet utility header +*/ +#ifndef __ENET_UTILITY_H__ +#define __ENET_UTILITY_H__ + +#define ENET_MAX(x, y) ((x) > (y) ? (x) : (y)) +#define ENET_MIN(x, y) ((x) < (y) ? (x) : (y)) +#define ENET_DIFFERENCE(x, y) ((x) < (y) ? (y) - (x) : (x) - (y)) + +#endif /* __ENET_UTILITY_H__ */ + diff --git a/pkg/enet/enet/include/enet/win32.h b/pkg/enet/enet/include/enet/win32.h new file mode 100644 index 000000000..1ba57909c --- /dev/null +++ b/pkg/enet/enet/include/enet/win32.h @@ -0,0 +1,63 @@ +/** + @file win32.h + @brief ENet Win32 header +*/ +#ifndef __ENET_WIN32_H__ +#define __ENET_WIN32_H__ + +#ifdef _MSC_VER +#ifdef ENET_BUILDING_LIB +#pragma warning (disable: 4267) // size_t to int conversion +#pragma warning (disable: 4244) // 64bit to 32bit int +#pragma warning (disable: 4018) // signed/unsigned mismatch +#pragma warning (disable: 4146) // unary minus operator applied to unsigned type +#ifndef _CRT_SECURE_NO_DEPRECATE +#define _CRT_SECURE_NO_DEPRECATE +#endif +#ifndef _CRT_SECURE_NO_WARNINGS +#define _CRT_SECURE_NO_WARNINGS +#endif +#endif +#endif + +#include +#include + +typedef SOCKET ENetSocket; + +#define ENET_SOCKET_NULL INVALID_SOCKET + +#define ENET_HOST_TO_NET_16(value) (htons (value)) +#define ENET_HOST_TO_NET_32(value) (htonl (value)) + +#define ENET_NET_TO_HOST_16(value) (ntohs (value)) +#define ENET_NET_TO_HOST_32(value) (ntohl (value)) + +typedef struct +{ + size_t dataLength; + void * data; +} ENetBuffer; + +#define ENET_CALLBACK __cdecl + +#ifdef ENET_DLL +#ifdef ENET_BUILDING_LIB +#define ENET_API __declspec( dllexport ) +#else +#define ENET_API __declspec( dllimport ) +#endif /* ENET_BUILDING_LIB */ +#else /* !ENET_DLL */ +#define ENET_API extern +#endif /* ENET_DLL */ + +typedef fd_set ENetSocketSet; + +#define ENET_SOCKETSET_EMPTY(sockset) FD_ZERO (& (sockset)) +#define ENET_SOCKETSET_ADD(sockset, socket) FD_SET (socket, & (sockset)) +#define ENET_SOCKETSET_REMOVE(sockset, socket) FD_CLR (socket, & (sockset)) +#define ENET_SOCKETSET_CHECK(sockset, socket) FD_ISSET (socket, & (sockset)) + +#endif /* __ENET_WIN32_H__ */ + + diff --git a/pkg/enet/enet/install-sh b/pkg/enet/enet/install-sh new file mode 100755 index 000000000..ec298b537 --- /dev/null +++ b/pkg/enet/enet/install-sh @@ -0,0 +1,541 @@ +#!/bin/sh +# install - install a program, script, or datafile + +scriptversion=2020-11-14.01; # UTC + +# This originates from X11R5 (mit/util/scripts/install.sh), which was +# later released in X11R6 (xc/config/util/install.sh) with the +# following copyright and license. +# +# Copyright (C) 1994 X Consortium +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN +# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- +# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +# Except as contained in this notice, the name of the X Consortium shall not +# be used in advertising or otherwise to promote the sale, use or other deal- +# ings in this Software without prior written authorization from the X Consor- +# tium. +# +# +# FSF changes to this file are in the public domain. +# +# Calling this script install-sh is preferred over install.sh, to prevent +# 'make' implicit rules from creating a file called install from it +# when there is no Makefile. +# +# This script is compatible with the BSD install script, but was written +# from scratch. + +tab=' ' +nl=' +' +IFS=" $tab$nl" + +# Set DOITPROG to "echo" to test this script. + +doit=${DOITPROG-} +doit_exec=${doit:-exec} + +# Put in absolute file names if you don't have them in your path; +# or use environment vars. + +chgrpprog=${CHGRPPROG-chgrp} +chmodprog=${CHMODPROG-chmod} +chownprog=${CHOWNPROG-chown} +cmpprog=${CMPPROG-cmp} +cpprog=${CPPROG-cp} +mkdirprog=${MKDIRPROG-mkdir} +mvprog=${MVPROG-mv} +rmprog=${RMPROG-rm} +stripprog=${STRIPPROG-strip} + +posix_mkdir= + +# Desired mode of installed file. +mode=0755 + +# Create dirs (including intermediate dirs) using mode 755. +# This is like GNU 'install' as of coreutils 8.32 (2020). +mkdir_umask=22 + +backupsuffix= +chgrpcmd= +chmodcmd=$chmodprog +chowncmd= +mvcmd=$mvprog +rmcmd="$rmprog -f" +stripcmd= + +src= +dst= +dir_arg= +dst_arg= + +copy_on_change=false +is_target_a_directory=possibly + +usage="\ +Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE + or: $0 [OPTION]... SRCFILES... DIRECTORY + or: $0 [OPTION]... -t DIRECTORY SRCFILES... + or: $0 [OPTION]... -d DIRECTORIES... + +In the 1st form, copy SRCFILE to DSTFILE. +In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. +In the 4th, create DIRECTORIES. + +Options: + --help display this help and exit. + --version display version info and exit. + + -c (ignored) + -C install only if different (preserve data modification time) + -d create directories instead of installing files. + -g GROUP $chgrpprog installed files to GROUP. + -m MODE $chmodprog installed files to MODE. + -o USER $chownprog installed files to USER. + -p pass -p to $cpprog. + -s $stripprog installed files. + -S SUFFIX attempt to back up existing files, with suffix SUFFIX. + -t DIRECTORY install into DIRECTORY. + -T report an error if DSTFILE is a directory. + +Environment variables override the default commands: + CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG + RMPROG STRIPPROG + +By default, rm is invoked with -f; when overridden with RMPROG, +it's up to you to specify -f if you want it. + +If -S is not specified, no backups are attempted. + +Email bug reports to bug-automake@gnu.org. +Automake home page: https://www.gnu.org/software/automake/ +" + +while test $# -ne 0; do + case $1 in + -c) ;; + + -C) copy_on_change=true;; + + -d) dir_arg=true;; + + -g) chgrpcmd="$chgrpprog $2" + shift;; + + --help) echo "$usage"; exit $?;; + + -m) mode=$2 + case $mode in + *' '* | *"$tab"* | *"$nl"* | *'*'* | *'?'* | *'['*) + echo "$0: invalid mode: $mode" >&2 + exit 1;; + esac + shift;; + + -o) chowncmd="$chownprog $2" + shift;; + + -p) cpprog="$cpprog -p";; + + -s) stripcmd=$stripprog;; + + -S) backupsuffix="$2" + shift;; + + -t) + is_target_a_directory=always + dst_arg=$2 + # Protect names problematic for 'test' and other utilities. + case $dst_arg in + -* | [=\(\)!]) dst_arg=./$dst_arg;; + esac + shift;; + + -T) is_target_a_directory=never;; + + --version) echo "$0 $scriptversion"; exit $?;; + + --) shift + break;; + + -*) echo "$0: invalid option: $1" >&2 + exit 1;; + + *) break;; + esac + shift +done + +# We allow the use of options -d and -T together, by making -d +# take the precedence; this is for compatibility with GNU install. + +if test -n "$dir_arg"; then + if test -n "$dst_arg"; then + echo "$0: target directory not allowed when installing a directory." >&2 + exit 1 + fi +fi + +if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then + # When -d is used, all remaining arguments are directories to create. + # When -t is used, the destination is already specified. + # Otherwise, the last argument is the destination. Remove it from $@. + for arg + do + if test -n "$dst_arg"; then + # $@ is not empty: it contains at least $arg. + set fnord "$@" "$dst_arg" + shift # fnord + fi + shift # arg + dst_arg=$arg + # Protect names problematic for 'test' and other utilities. + case $dst_arg in + -* | [=\(\)!]) dst_arg=./$dst_arg;; + esac + done +fi + +if test $# -eq 0; then + if test -z "$dir_arg"; then + echo "$0: no input file specified." >&2 + exit 1 + fi + # It's OK to call 'install-sh -d' without argument. + # This can happen when creating conditional directories. + exit 0 +fi + +if test -z "$dir_arg"; then + if test $# -gt 1 || test "$is_target_a_directory" = always; then + if test ! -d "$dst_arg"; then + echo "$0: $dst_arg: Is not a directory." >&2 + exit 1 + fi + fi +fi + +if test -z "$dir_arg"; then + do_exit='(exit $ret); exit $ret' + trap "ret=129; $do_exit" 1 + trap "ret=130; $do_exit" 2 + trap "ret=141; $do_exit" 13 + trap "ret=143; $do_exit" 15 + + # Set umask so as not to create temps with too-generous modes. + # However, 'strip' requires both read and write access to temps. + case $mode in + # Optimize common cases. + *644) cp_umask=133;; + *755) cp_umask=22;; + + *[0-7]) + if test -z "$stripcmd"; then + u_plus_rw= + else + u_plus_rw='% 200' + fi + cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; + *) + if test -z "$stripcmd"; then + u_plus_rw= + else + u_plus_rw=,u+rw + fi + cp_umask=$mode$u_plus_rw;; + esac +fi + +for src +do + # Protect names problematic for 'test' and other utilities. + case $src in + -* | [=\(\)!]) src=./$src;; + esac + + if test -n "$dir_arg"; then + dst=$src + dstdir=$dst + test -d "$dstdir" + dstdir_status=$? + # Don't chown directories that already exist. + if test $dstdir_status = 0; then + chowncmd="" + fi + else + + # Waiting for this to be detected by the "$cpprog $src $dsttmp" command + # might cause directories to be created, which would be especially bad + # if $src (and thus $dsttmp) contains '*'. + if test ! -f "$src" && test ! -d "$src"; then + echo "$0: $src does not exist." >&2 + exit 1 + fi + + if test -z "$dst_arg"; then + echo "$0: no destination specified." >&2 + exit 1 + fi + dst=$dst_arg + + # If destination is a directory, append the input filename. + if test -d "$dst"; then + if test "$is_target_a_directory" = never; then + echo "$0: $dst_arg: Is a directory" >&2 + exit 1 + fi + dstdir=$dst + dstbase=`basename "$src"` + case $dst in + */) dst=$dst$dstbase;; + *) dst=$dst/$dstbase;; + esac + dstdir_status=0 + else + dstdir=`dirname "$dst"` + test -d "$dstdir" + dstdir_status=$? + fi + fi + + case $dstdir in + */) dstdirslash=$dstdir;; + *) dstdirslash=$dstdir/;; + esac + + obsolete_mkdir_used=false + + if test $dstdir_status != 0; then + case $posix_mkdir in + '') + # With -d, create the new directory with the user-specified mode. + # Otherwise, rely on $mkdir_umask. + if test -n "$dir_arg"; then + mkdir_mode=-m$mode + else + mkdir_mode= + fi + + posix_mkdir=false + # The $RANDOM variable is not portable (e.g., dash). Use it + # here however when possible just to lower collision chance. + tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ + + trap ' + ret=$? + rmdir "$tmpdir/a/b" "$tmpdir/a" "$tmpdir" 2>/dev/null + exit $ret + ' 0 + + # Because "mkdir -p" follows existing symlinks and we likely work + # directly in world-writeable /tmp, make sure that the '$tmpdir' + # directory is successfully created first before we actually test + # 'mkdir -p'. + if (umask $mkdir_umask && + $mkdirprog $mkdir_mode "$tmpdir" && + exec $mkdirprog $mkdir_mode -p -- "$tmpdir/a/b") >/dev/null 2>&1 + then + if test -z "$dir_arg" || { + # Check for POSIX incompatibilities with -m. + # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or + # other-writable bit of parent directory when it shouldn't. + # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. + test_tmpdir="$tmpdir/a" + ls_ld_tmpdir=`ls -ld "$test_tmpdir"` + case $ls_ld_tmpdir in + d????-?r-*) different_mode=700;; + d????-?--*) different_mode=755;; + *) false;; + esac && + $mkdirprog -m$different_mode -p -- "$test_tmpdir" && { + ls_ld_tmpdir_1=`ls -ld "$test_tmpdir"` + test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" + } + } + then posix_mkdir=: + fi + rmdir "$tmpdir/a/b" "$tmpdir/a" "$tmpdir" + else + # Remove any dirs left behind by ancient mkdir implementations. + rmdir ./$mkdir_mode ./-p ./-- "$tmpdir" 2>/dev/null + fi + trap '' 0;; + esac + + if + $posix_mkdir && ( + umask $mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" + ) + then : + else + + # mkdir does not conform to POSIX, + # or it failed possibly due to a race condition. Create the + # directory the slow way, step by step, checking for races as we go. + + case $dstdir in + /*) prefix='/';; + [-=\(\)!]*) prefix='./';; + *) prefix='';; + esac + + oIFS=$IFS + IFS=/ + set -f + set fnord $dstdir + shift + set +f + IFS=$oIFS + + prefixes= + + for d + do + test X"$d" = X && continue + + prefix=$prefix$d + if test -d "$prefix"; then + prefixes= + else + if $posix_mkdir; then + (umask $mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break + # Don't fail if two instances are running concurrently. + test -d "$prefix" || exit 1 + else + case $prefix in + *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; + *) qprefix=$prefix;; + esac + prefixes="$prefixes '$qprefix'" + fi + fi + prefix=$prefix/ + done + + if test -n "$prefixes"; then + # Don't fail if two instances are running concurrently. + (umask $mkdir_umask && + eval "\$doit_exec \$mkdirprog $prefixes") || + test -d "$dstdir" || exit 1 + obsolete_mkdir_used=true + fi + fi + fi + + if test -n "$dir_arg"; then + { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && + { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && + { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || + test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 + else + + # Make a couple of temp file names in the proper directory. + dsttmp=${dstdirslash}_inst.$$_ + rmtmp=${dstdirslash}_rm.$$_ + + # Trap to clean up those temp files at exit. + trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 + + # Copy the file name to the temp name. + (umask $cp_umask && + { test -z "$stripcmd" || { + # Create $dsttmp read-write so that cp doesn't create it read-only, + # which would cause strip to fail. + if test -z "$doit"; then + : >"$dsttmp" # No need to fork-exec 'touch'. + else + $doit touch "$dsttmp" + fi + } + } && + $doit_exec $cpprog "$src" "$dsttmp") && + + # and set any options; do chmod last to preserve setuid bits. + # + # If any of these fail, we abort the whole thing. If we want to + # ignore errors from any of these, just make sure not to ignore + # errors from the above "$doit $cpprog $src $dsttmp" command. + # + { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && + { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && + { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && + { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && + + # If -C, don't bother to copy if it wouldn't change the file. + if $copy_on_change && + old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && + new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && + set -f && + set X $old && old=:$2:$4:$5:$6 && + set X $new && new=:$2:$4:$5:$6 && + set +f && + test "$old" = "$new" && + $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 + then + rm -f "$dsttmp" + else + # If $backupsuffix is set, and the file being installed + # already exists, attempt a backup. Don't worry if it fails, + # e.g., if mv doesn't support -f. + if test -n "$backupsuffix" && test -f "$dst"; then + $doit $mvcmd -f "$dst" "$dst$backupsuffix" 2>/dev/null + fi + + # Rename the file to the real destination. + $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || + + # The rename failed, perhaps because mv can't rename something else + # to itself, or perhaps because mv is so ancient that it does not + # support -f. + { + # Now remove or move aside any old file at destination location. + # We try this two ways since rm can't unlink itself on some + # systems and the destination file might be busy for other + # reasons. In this case, the final cleanup might fail but the new + # file should still install successfully. + { + test ! -f "$dst" || + $doit $rmcmd "$dst" 2>/dev/null || + { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && + { $doit $rmcmd "$rmtmp" 2>/dev/null; :; } + } || + { echo "$0: cannot unlink or rename $dst" >&2 + (exit 1); exit 1 + } + } && + + # Now rename the file to the real destination. + $doit $mvcmd "$dsttmp" "$dst" + } + fi || exit 1 + + trap '' 0 + fi +done + +# Local variables: +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC0" +# time-stamp-end: "; # UTC" +# End: diff --git a/pkg/enet/enet/libenet.pc b/pkg/enet/enet/libenet.pc new file mode 100644 index 000000000..587a6593a --- /dev/null +++ b/pkg/enet/enet/libenet.pc @@ -0,0 +1,10 @@ +prefix=/usr/local +exec_prefix=${prefix} +libdir=${exec_prefix}/lib +includedir=${prefix}/include + +Name: libenet +Description: Low-latency UDP networking library supporting optional reliability +Version: 1.3.18 +Cflags: -I${includedir} +Libs: -L${libdir} -lenet diff --git a/pkg/enet/enet/libenet.pc.in b/pkg/enet/enet/libenet.pc.in new file mode 100644 index 000000000..7af85adbf --- /dev/null +++ b/pkg/enet/enet/libenet.pc.in @@ -0,0 +1,10 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: @PACKAGE_NAME@ +Description: Low-latency UDP networking library supporting optional reliability +Version: @PACKAGE_VERSION@ +Cflags: -I${includedir} +Libs: -L${libdir} -lenet diff --git a/pkg/enet/enet/libtool b/pkg/enet/enet/libtool new file mode 100755 index 000000000..f4ab9baed --- /dev/null +++ b/pkg/enet/enet/libtool @@ -0,0 +1,11937 @@ +#! /bin/sh +# Generated automatically by config.status (libenet) 1.3.18 +# Libtool was configured on host Air-Caleb-2: +# NOTE: Changes made to this file will be lost: look at ltmain.sh. + +# Provide generalized library-building support services. +# Written by Gordon Matzigkeit, 1996 + +# Copyright (C) 2014 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program or library that is built +# using GNU Libtool, you may include this file under the same +# distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +# The names of the tagged configurations supported by this script. +available_tags='' + +# Configured defaults for sys_lib_dlsearch_path munging. +: ${LT_SYS_LIBRARY_PATH=""} + +# ### BEGIN LIBTOOL CONFIG + +# Which release of libtool.m4 was used? +macro_version=2.4.7 +macro_revision=2.4.7 + +# Whether or not to build shared libraries. +build_libtool_libs=yes + +# Whether or not to build static libraries. +build_old_libs=yes + +# What type of objects to build. +pic_mode=default + +# Whether or not to optimize for fast installation. +fast_install=needless + +# Shared archive member basename,for filename based shared library versioning on AIX. +shared_archive_member_spec= + +# Shell to use when invoking shell scripts. +SHELL="/bin/sh" + +# An echo program that protects backslashes. +ECHO="printf %s\\n" + +# The PATH separator for the build system. +PATH_SEPARATOR=":" + +# The host system. +host_alias= +host=aarch64-apple-darwin22.1.0 +host_os=darwin22.1.0 + +# The build system. +build_alias= +build=aarch64-apple-darwin22.1.0 +build_os=darwin22.1.0 + +# A sed program that does not truncate output. +SED="/usr/bin/sed" + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="$SED -e 1s/^X//" + +# A grep program that handles long lines. +GREP="/usr/bin/grep" + +# An ERE matcher. +EGREP="/usr/bin/grep -E" + +# A literal string matcher. +FGREP="/usr/bin/grep -F" + +# A BSD- or MS-compatible name lister. +NM="/usr/bin/nm -B" + +# Whether we need soft or hard links. +LN_S="ln -s" + +# What is the maximum length of a command? +max_cmd_len=786432 + +# Object file suffix (normally "o"). +objext=o + +# Executable file suffix (normally ""). +exeext= + +# whether the shell understands "unset". +lt_unset=unset + +# turn spaces into newlines. +SP2NL="tr \\040 \\012" + +# turn newlines into spaces. +NL2SP="tr \\015\\012 \\040\\040" + +# convert $build file names to $host format. +to_host_file_cmd=func_convert_file_noop + +# convert $build files to toolchain format. +to_tool_file_cmd=func_convert_file_noop + +# A file(cmd) program that detects file types. +FILECMD="file" + +# An object symbol dumper. +OBJDUMP="objdump" + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method="pass_all" + +# Command to use when deplibs_check_method = "file_magic". +file_magic_cmd="\$MAGIC_CMD" + +# How to find potential files when deplibs_check_method = "file_magic". +file_magic_glob="" + +# Find potential files using nocaseglob when deplibs_check_method = "file_magic". +want_nocaseglob="no" + +# DLL creation program. +DLLTOOL="false" + +# Command to associate shared and link libraries. +sharedlib_from_linklib_cmd="printf %s\\n" + +# The archiver. +AR="ar" + +# Flags to create an archive (by configure). +lt_ar_flags=cr + +# Flags to create an archive. +AR_FLAGS=${ARFLAGS-"$lt_ar_flags"} + +# How to feed a file listing to the archiver. +archiver_list_spec="" + +# A symbol stripping program. +STRIP="strip" + +# Commands used to install an old-style archive. +RANLIB="ranlib" +old_postinstall_cmds="chmod 644 \$oldlib~\$RANLIB \$tool_oldlib" +old_postuninstall_cmds="" + +# Whether to use a lock for old archive extraction. +lock_old_archive_extraction=yes + +# A C compiler. +LTCC="gcc" + +# LTCC compiler flags. +LTCFLAGS="-g -O2" + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe="/usr/bin/sed -n -e 's/^.*[ ]\\([ABCDGIRSTW][ABCDGIRSTW]*\\)[ ][ ]*_\\([_A-Za-z][_A-Za-z0-9]*\\)\$/\\1 _\\2 \\2/p' | /usr/bin/sed '/ __gnu_lto/d'" + +# Transform the output of nm in a proper C declaration. +global_symbol_to_cdecl="/usr/bin/sed -n -e 's/^T .* \\(.*\\)\$/extern int \\1();/p' -e 's/^[ABCDGIRSTW][ABCDGIRSTW]* .* \\(.*\\)\$/extern char \\1;/p'" + +# Transform the output of nm into a list of symbols to manually relocate. +global_symbol_to_import="" + +# Transform the output of nm in a C name address pair. +global_symbol_to_c_name_address="/usr/bin/sed -n -e 's/^: \\(.*\\) .*\$/ {\"\\1\", (void *) 0},/p' -e 's/^[ABCDGIRSTW][ABCDGIRSTW]* .* \\(.*\\)\$/ {\"\\1\", (void *) \\&\\1},/p'" + +# Transform the output of nm in a C name address pair when lib prefix is needed. +global_symbol_to_c_name_address_lib_prefix="/usr/bin/sed -n -e 's/^: \\(.*\\) .*\$/ {\"\\1\", (void *) 0},/p' -e 's/^[ABCDGIRSTW][ABCDGIRSTW]* .* \\(lib.*\\)\$/ {\"\\1\", (void *) \\&\\1},/p' -e 's/^[ABCDGIRSTW][ABCDGIRSTW]* .* \\(.*\\)\$/ {\"lib\\1\", (void *) \\&\\1},/p'" + +# The name lister interface. +nm_interface="BSD nm" + +# Specify filename containing input files for $NM. +nm_file_list_spec="@" + +# The root where to search for dependent libraries,and where our libraries should be installed. +lt_sysroot= + +# Command to truncate a binary pipe. +lt_truncate_bin="/bin/dd bs=4096 count=1" + +# The name of the directory that contains temporary libtool files. +objdir=.libs + +# Used to examine libraries when file_magic_cmd begins with "file". +MAGIC_CMD=file + +# Must we lock files when doing compilation? +need_locks="no" + +# Manifest tool. +MANIFEST_TOOL=":" + +# Tool to manipulate archived DWARF debug symbol files on Mac OS X. +DSYMUTIL="dsymutil" + +# Tool to change global to local symbols on Mac OS X. +NMEDIT="nmedit" + +# Tool to manipulate fat objects and archives on Mac OS X. +LIPO="lipo" + +# ldd/readelf like tool for Mach-O binaries on Mac OS X. +OTOOL="otool" + +# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. +OTOOL64=":" + +# Old archive suffix (normally "a"). +libext=a + +# Shared library suffix (normally ".so"). +shrext_cmds="\`test .\$module = .yes && echo .so || echo .dylib\`" + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds="" + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at link time. +variables_saved_for_relink="PATH DYLD_LIBRARY_PATH GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" + +# Do we need the "lib" prefix for modules? +need_lib_prefix=no + +# Do we need a version for libraries? +need_version=no + +# Library versioning type. +version_type=darwin + +# Shared library runtime path variable. +runpath_var= + +# Shared library path variable. +shlibpath_var=DYLD_LIBRARY_PATH + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=yes + +# Format of library name prefix. +libname_spec="lib\$name" + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME +library_names_spec="\$libname\$release\$major\$shared_ext \$libname\$shared_ext" + +# The coded name of the library, if different from the real name. +soname_spec="\$libname\$release\$major\$shared_ext" + +# Permission mode override for installation of shared libraries. +install_override_mode="" + +# Command to use after installation of a shared archive. +postinstall_cmds="" + +# Command to use after uninstallation of a shared archive. +postuninstall_cmds="" + +# Commands used to finish a libtool library installation in a directory. +finish_cmds="" + +# As "finish_cmds", except a single script fragment to be evaled but +# not shown. +finish_eval="" + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=no + +# Compile-time system search path for libraries. +sys_lib_search_path_spec="/Library/Developer/CommandLineTools/usr/lib/clang/14.0.3 /usr/local/lib" + +# Detected run-time system search path for libraries. +sys_lib_dlsearch_path_spec="/usr/local/lib /lib /usr/lib" + +# Explicit LT_SYS_LIBRARY_PATH set during ./configure time. +configure_time_lt_sys_library_path="" + +# Whether dlopen is supported. +dlopen_support=unknown + +# Whether dlopen of programs is supported. +dlopen_self=unknown + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=unknown + +# Commands to strip libraries. +old_striplib="strip -S" +striplib="strip -x" + + +# The linker used to build libraries. +LD="/Library/Developer/CommandLineTools/usr/bin/ld" + +# How to create reloadable object files. +reload_flag=" -r" +reload_cmds="\$LTCC \$LTCFLAGS -nostdlib \$wl-r -o \$output\$reload_objs" + +# Commands used to build an old-style archive. +old_archive_cmds="\$AR \$AR_FLAGS \$oldlib\$oldobjs~\$RANLIB \$tool_oldlib" + +# A language specific compiler. +CC="gcc" + +# Is the compiler the GNU compiler? +with_gcc=yes + +# Compiler flag to turn off builtin functions. +no_builtin_flag=" -fno-builtin -fno-rtti -fno-exceptions" + +# Additional compiler flags for building library objects. +pic_flag=" -fno-common -DPIC" + +# How to pass a linker flag through the compiler. +wl="-Wl," + +# Compiler flag to prevent dynamic linking. +link_static_flag="" + +# Does compiler simultaneously support -c and -o options? +compiler_c_o="yes" + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=no + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=no + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec="" + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec="\`for conv in \$convenience\\\"\\\"; do test -n \\\"\$conv\\\" && new_convenience=\\\"\$new_convenience \$wl-force_load,\$conv\\\"; done; func_echo_all \\\"\$new_convenience\\\"\`" + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object="no" + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds="" + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds="" + +# Commands used to build a shared archive. +archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring \$single_module" +archive_expsym_cmds="/usr/bin/sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring \$single_module \$wl-exported_symbols_list,\$output_objdir/\$libname-symbols.expsym" + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags" +module_expsym_cmds="/usr/bin/sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags \$wl-exported_symbols_list,\$output_objdir/\$libname-symbols.expsym" + +# Whether we are building with GNU ld or not. +with_gnu_ld="no" + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag="\$wl-undefined \${wl}dynamic_lookup" + +# Flag that enforces no undefined symbols. +no_undefined_flag="" + +# Flag to hardcode $libdir into a binary during linking. +# This must work even if $libdir does not exist +hardcode_libdir_flag_spec="" + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator="" + +# Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=no + +# Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting $shlibpath_var if the +# library is relocated. +hardcode_direct_absolute=no + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=no + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=unsupported + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=yes + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=no + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=yes + +# Set to "yes" if exported symbols are required. +always_export_symbols=no + +# The commands to list exported symbols. +export_symbols_cmds="\$NM \$libobjs \$convenience | \$global_symbol_pipe | \$SED 's/.* //' | sort | uniq > \$export_symbols" + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms="_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*" + +# Symbols that must always be exported. +include_expsyms="" + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds="" + +# Commands necessary for finishing linking programs. +postlink_cmds="" + +# Specify filename containing input files. +file_list_spec="" + +# How to hardcode a shared library path into an executable. +hardcode_action=immediate + +# ### END LIBTOOL CONFIG + + +# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE + +# func_munge_path_list VARIABLE PATH +# ----------------------------------- +# VARIABLE is name of variable containing _space_ separated list of +# directories to be munged by the contents of PATH, which is string +# having a format: +# "DIR[:DIR]:" +# string "DIR[ DIR]" will be prepended to VARIABLE +# ":DIR[:DIR]" +# string "DIR[ DIR]" will be appended to VARIABLE +# "DIRP[:DIRP]::[DIRA:]DIRA" +# string "DIRP[ DIRP]" will be prepended to VARIABLE and string +# "DIRA[ DIRA]" will be appended to VARIABLE +# "DIR[:DIR]" +# VARIABLE will be replaced by "DIR[ DIR]" +func_munge_path_list () +{ + case x$2 in + x) + ;; + *:) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" + ;; + x:*) + eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" + ;; + *::*) + eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" + eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" + ;; + *) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" + ;; + esac +} + + +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +func_cc_basename () +{ + for cc_temp in $*""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac + done + func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +} + + +# ### END FUNCTIONS SHARED WITH CONFIGURE + +#! /usr/bin/env sh +## DO NOT EDIT - This file generated from ./build-aux/ltmain.in +## by inline-source v2019-02-19.15 + +# libtool (GNU libtool) 2.4.7 +# Provide generalized library-building support services. +# Written by Gordon Matzigkeit , 1996 + +# Copyright (C) 1996-2019, 2021-2022 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +PROGRAM=libtool +PACKAGE=libtool +VERSION=2.4.7 +package_revision=2.4.7 + + +## ------ ## +## Usage. ## +## ------ ## + +# Run './libtool --help' for help with using this script from the +# command line. + + +## ------------------------------- ## +## User overridable command paths. ## +## ------------------------------- ## + +# After configure completes, it has a better idea of some of the +# shell tools we need than the defaults used by the functions shared +# with bootstrap, so set those here where they can still be over- +# ridden by the user, but otherwise take precedence. + +: ${AUTOCONF="autoconf"} +: ${AUTOMAKE="automake"} + + +## -------------------------- ## +## Source external libraries. ## +## -------------------------- ## + +# Much of our low-level functionality needs to be sourced from external +# libraries, which are installed to $pkgauxdir. + +# Set a version string for this script. +scriptversion=2019-02-19.15; # UTC + +# General shell script boiler plate, and helper functions. +# Written by Gary V. Vaughan, 2004 + +# This is free software. There is NO warranty; not even for +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# +# Copyright (C) 2004-2019, 2021 Bootstrap Authors +# +# This file is dual licensed under the terms of the MIT license +# , and GPL version 2 or later +# . You must apply one of +# these licenses when using or redistributing this software or any of +# the files within it. See the URLs above, or the file `LICENSE` +# included in the Bootstrap distribution for the full license texts. + +# Please report bugs or propose patches to: +# + + +## ------ ## +## Usage. ## +## ------ ## + +# Evaluate this file near the top of your script to gain access to +# the functions and variables defined here: +# +# . `echo "$0" | ${SED-sed} 's|[^/]*$||'`/build-aux/funclib.sh +# +# If you need to override any of the default environment variable +# settings, do that before evaluating this file. + + +## -------------------- ## +## Shell normalisation. ## +## -------------------- ## + +# Some shells need a little help to be as Bourne compatible as possible. +# Before doing anything else, make sure all that help has been provided! + +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac +fi + +# NLS nuisances: We save the old values in case they are required later. +_G_user_locale= +_G_safe_locale= +for _G_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES +do + eval "if test set = \"\${$_G_var+set}\"; then + save_$_G_var=\$$_G_var + $_G_var=C + export $_G_var + _G_user_locale=\"$_G_var=\\\$save_\$_G_var; \$_G_user_locale\" + _G_safe_locale=\"$_G_var=C; \$_G_safe_locale\" + fi" +done +# These NLS vars are set unconditionally (bootstrap issue #24). Unset those +# in case the environment reset is needed later and the $save_* variant is not +# defined (see the code above). +LC_ALL=C +LANGUAGE=C +export LANGUAGE LC_ALL + +# Make sure IFS has a sensible default +sp=' ' +nl=' +' +IFS="$sp $nl" + +# There are apparently some retarded systems that use ';' as a PATH separator! +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# func_unset VAR +# -------------- +# Portably unset VAR. +# In some shells, an 'unset VAR' statement leaves a non-zero return +# status if VAR is already unset, which might be problematic if the +# statement is used at the end of a function (thus poisoning its return +# value) or when 'set -e' is active (causing even a spurious abort of +# the script in this case). +func_unset () +{ + { eval $1=; (eval unset $1) >/dev/null 2>&1 && eval unset $1 || : ; } +} + + +# Make sure CDPATH doesn't cause `cd` commands to output the target dir. +func_unset CDPATH + +# Make sure ${,E,F}GREP behave sanely. +func_unset GREP_OPTIONS + + +## ------------------------- ## +## Locate command utilities. ## +## ------------------------- ## + + +# func_executable_p FILE +# ---------------------- +# Check that FILE is an executable regular file. +func_executable_p () +{ + test -f "$1" && test -x "$1" +} + + +# func_path_progs PROGS_LIST CHECK_FUNC [PATH] +# -------------------------------------------- +# Search for either a program that responds to --version with output +# containing "GNU", or else returned by CHECK_FUNC otherwise, by +# trying all the directories in PATH with each of the elements of +# PROGS_LIST. +# +# CHECK_FUNC should accept the path to a candidate program, and +# set $func_check_prog_result if it truncates its output less than +# $_G_path_prog_max characters. +func_path_progs () +{ + _G_progs_list=$1 + _G_check_func=$2 + _G_PATH=${3-"$PATH"} + + _G_path_prog_max=0 + _G_path_prog_found=false + _G_save_IFS=$IFS; IFS=${PATH_SEPARATOR-:} + for _G_dir in $_G_PATH; do + IFS=$_G_save_IFS + test -z "$_G_dir" && _G_dir=. + for _G_prog_name in $_G_progs_list; do + for _exeext in '' .EXE; do + _G_path_prog=$_G_dir/$_G_prog_name$_exeext + func_executable_p "$_G_path_prog" || continue + case `"$_G_path_prog" --version 2>&1` in + *GNU*) func_path_progs_result=$_G_path_prog _G_path_prog_found=: ;; + *) $_G_check_func $_G_path_prog + func_path_progs_result=$func_check_prog_result + ;; + esac + $_G_path_prog_found && break 3 + done + done + done + IFS=$_G_save_IFS + test -z "$func_path_progs_result" && { + echo "no acceptable sed could be found in \$PATH" >&2 + exit 1 + } +} + + +# We want to be able to use the functions in this file before configure +# has figured out where the best binaries are kept, which means we have +# to search for them ourselves - except when the results are already set +# where we skip the searches. + +# Unless the user overrides by setting SED, search the path for either GNU +# sed, or the sed that truncates its output the least. +test -z "$SED" && { + _G_sed_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ + for _G_i in 1 2 3 4 5 6 7; do + _G_sed_script=$_G_sed_script$nl$_G_sed_script + done + echo "$_G_sed_script" 2>/dev/null | sed 99q >conftest.sed + _G_sed_script= + + func_check_prog_sed () + { + _G_path_prog=$1 + + _G_count=0 + printf 0123456789 >conftest.in + while : + do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo '' >> conftest.nl + "$_G_path_prog" -f conftest.sed conftest.out 2>/dev/null || break + diff conftest.out conftest.nl >/dev/null 2>&1 || break + _G_count=`expr $_G_count + 1` + if test "$_G_count" -gt "$_G_path_prog_max"; then + # Best one so far, save it but keep looking for a better one + func_check_prog_result=$_G_path_prog + _G_path_prog_max=$_G_count + fi + # 10*(2^10) chars as input seems more than enough + test 10 -lt "$_G_count" && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out + } + + func_path_progs "sed gsed" func_check_prog_sed "$PATH:/usr/xpg4/bin" + rm -f conftest.sed + SED=$func_path_progs_result +} + + +# Unless the user overrides by setting GREP, search the path for either GNU +# grep, or the grep that truncates its output the least. +test -z "$GREP" && { + func_check_prog_grep () + { + _G_path_prog=$1 + + _G_count=0 + _G_path_prog_max=0 + printf 0123456789 >conftest.in + while : + do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo 'GREP' >> conftest.nl + "$_G_path_prog" -e 'GREP$' -e '-(cannot match)-' conftest.out 2>/dev/null || break + diff conftest.out conftest.nl >/dev/null 2>&1 || break + _G_count=`expr $_G_count + 1` + if test "$_G_count" -gt "$_G_path_prog_max"; then + # Best one so far, save it but keep looking for a better one + func_check_prog_result=$_G_path_prog + _G_path_prog_max=$_G_count + fi + # 10*(2^10) chars as input seems more than enough + test 10 -lt "$_G_count" && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out + } + + func_path_progs "grep ggrep" func_check_prog_grep "$PATH:/usr/xpg4/bin" + GREP=$func_path_progs_result +} + + +## ------------------------------- ## +## User overridable command paths. ## +## ------------------------------- ## + +# All uppercase variable names are used for environment variables. These +# variables can be overridden by the user before calling a script that +# uses them if a suitable command of that name is not already available +# in the command search PATH. + +: ${CP="cp -f"} +: ${ECHO="printf %s\n"} +: ${EGREP="$GREP -E"} +: ${FGREP="$GREP -F"} +: ${LN_S="ln -s"} +: ${MAKE="make"} +: ${MKDIR="mkdir"} +: ${MV="mv -f"} +: ${RM="rm -f"} +: ${SHELL="${CONFIG_SHELL-/bin/sh}"} + + +## -------------------- ## +## Useful sed snippets. ## +## -------------------- ## + +sed_dirname='s|/[^/]*$||' +sed_basename='s|^.*/||' + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +sed_quote_subst='s|\([`"$\\]\)|\\\1|g' + +# Same as above, but do not quote variable references. +sed_double_quote_subst='s/\(["`\\]\)/\\\1/g' + +# Sed substitution that turns a string into a regex matching for the +# string literally. +sed_make_literal_regex='s|[].[^$\\*\/]|\\&|g' + +# Sed substitution that converts a w32 file name or path +# that contains forward slashes, into one that contains +# (escaped) backslashes. A very naive implementation. +sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' + +# Re-'\' parameter expansions in output of sed_double_quote_subst that +# were '\'-ed in input to the same. If an odd number of '\' preceded a +# '$' in input to sed_double_quote_subst, that '$' was protected from +# expansion. Since each input '\' is now two '\'s, look for any number +# of runs of four '\'s followed by two '\'s and then a '$'. '\' that '$'. +_G_bs='\\' +_G_bs2='\\\\' +_G_bs4='\\\\\\\\' +_G_dollar='\$' +sed_double_backslash="\ + s/$_G_bs4/&\\ +/g + s/^$_G_bs2$_G_dollar/$_G_bs&/ + s/\\([^$_G_bs]\\)$_G_bs2$_G_dollar/\\1$_G_bs2$_G_bs$_G_dollar/g + s/\n//g" + +# require_check_ifs_backslash +# --------------------------- +# Check if we can use backslash as IFS='\' separator, and set +# $check_ifs_backshlash_broken to ':' or 'false'. +require_check_ifs_backslash=func_require_check_ifs_backslash +func_require_check_ifs_backslash () +{ + _G_save_IFS=$IFS + IFS='\' + _G_check_ifs_backshlash='a\\b' + for _G_i in $_G_check_ifs_backshlash + do + case $_G_i in + a) + check_ifs_backshlash_broken=false + ;; + '') + break + ;; + *) + check_ifs_backshlash_broken=: + break + ;; + esac + done + IFS=$_G_save_IFS + require_check_ifs_backslash=: +} + + +## ----------------- ## +## Global variables. ## +## ----------------- ## + +# Except for the global variables explicitly listed below, the following +# functions in the '^func_' namespace, and the '^require_' namespace +# variables initialised in the 'Resource management' section, sourcing +# this file will not pollute your global namespace with anything +# else. There's no portable way to scope variables in Bourne shell +# though, so actually running these functions will sometimes place +# results into a variable named after the function, and often use +# temporary variables in the '^_G_' namespace. If you are careful to +# avoid using those namespaces casually in your sourcing script, things +# should continue to work as you expect. And, of course, you can freely +# overwrite any of the functions or variables defined here before +# calling anything to customize them. + +EXIT_SUCCESS=0 +EXIT_FAILURE=1 +EXIT_MISMATCH=63 # $? = 63 is used to indicate version mismatch to missing. +EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake. + +# Allow overriding, eg assuming that you follow the convention of +# putting '$debug_cmd' at the start of all your functions, you can get +# bash to show function call trace with: +# +# debug_cmd='eval echo "${FUNCNAME[0]} $*" >&2' bash your-script-name +debug_cmd=${debug_cmd-":"} +exit_cmd=: + +# By convention, finish your script with: +# +# exit $exit_status +# +# so that you can set exit_status to non-zero if you want to indicate +# something went wrong during execution without actually bailing out at +# the point of failure. +exit_status=$EXIT_SUCCESS + +# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh +# is ksh but when the shell is invoked as "sh" and the current value of +# the _XPG environment variable is not equal to 1 (one), the special +# positional parameter $0, within a function call, is the name of the +# function. +progpath=$0 + +# The name of this program. +progname=`$ECHO "$progpath" |$SED "$sed_basename"` + +# Make sure we have an absolute progpath for reexecution: +case $progpath in + [\\/]*|[A-Za-z]:\\*) ;; + *[\\/]*) + progdir=`$ECHO "$progpath" |$SED "$sed_dirname"` + progdir=`cd "$progdir" && pwd` + progpath=$progdir/$progname + ;; + *) + _G_IFS=$IFS + IFS=${PATH_SEPARATOR-:} + for progdir in $PATH; do + IFS=$_G_IFS + test -x "$progdir/$progname" && break + done + IFS=$_G_IFS + test -n "$progdir" || progdir=`pwd` + progpath=$progdir/$progname + ;; +esac + + +## ----------------- ## +## Standard options. ## +## ----------------- ## + +# The following options affect the operation of the functions defined +# below, and should be set appropriately depending on run-time para- +# meters passed on the command line. + +opt_dry_run=false +opt_quiet=false +opt_verbose=false + +# Categories 'all' and 'none' are always available. Append any others +# you will pass as the first argument to func_warning from your own +# code. +warning_categories= + +# By default, display warnings according to 'opt_warning_types'. Set +# 'warning_func' to ':' to elide all warnings, or func_fatal_error to +# treat the next displayed warning as a fatal error. +warning_func=func_warn_and_continue + +# Set to 'all' to display all warnings, 'none' to suppress all +# warnings, or a space delimited list of some subset of +# 'warning_categories' to display only the listed warnings. +opt_warning_types=all + + +## -------------------- ## +## Resource management. ## +## -------------------- ## + +# This section contains definitions for functions that each ensure a +# particular resource (a file, or a non-empty configuration variable for +# example) is available, and if appropriate to extract default values +# from pertinent package files. Call them using their associated +# 'require_*' variable to ensure that they are executed, at most, once. +# +# It's entirely deliberate that calling these functions can set +# variables that don't obey the namespace limitations obeyed by the rest +# of this file, in order that that they be as useful as possible to +# callers. + + +# require_term_colors +# ------------------- +# Allow display of bold text on terminals that support it. +require_term_colors=func_require_term_colors +func_require_term_colors () +{ + $debug_cmd + + test -t 1 && { + # COLORTERM and USE_ANSI_COLORS environment variables take + # precedence, because most terminfo databases neglect to describe + # whether color sequences are supported. + test -n "${COLORTERM+set}" && : ${USE_ANSI_COLORS="1"} + + if test 1 = "$USE_ANSI_COLORS"; then + # Standard ANSI escape sequences + tc_reset='' + tc_bold=''; tc_standout='' + tc_red=''; tc_green='' + tc_blue=''; tc_cyan='' + else + # Otherwise trust the terminfo database after all. + test -n "`tput sgr0 2>/dev/null`" && { + tc_reset=`tput sgr0` + test -n "`tput bold 2>/dev/null`" && tc_bold=`tput bold` + tc_standout=$tc_bold + test -n "`tput smso 2>/dev/null`" && tc_standout=`tput smso` + test -n "`tput setaf 1 2>/dev/null`" && tc_red=`tput setaf 1` + test -n "`tput setaf 2 2>/dev/null`" && tc_green=`tput setaf 2` + test -n "`tput setaf 4 2>/dev/null`" && tc_blue=`tput setaf 4` + test -n "`tput setaf 5 2>/dev/null`" && tc_cyan=`tput setaf 5` + } + fi + } + + require_term_colors=: +} + + +## ----------------- ## +## Function library. ## +## ----------------- ## + +# This section contains a variety of useful functions to call in your +# scripts. Take note of the portable wrappers for features provided by +# some modern shells, which will fall back to slower equivalents on +# less featureful shells. + + +# func_append VAR VALUE +# --------------------- +# Append VALUE onto the existing contents of VAR. + + # We should try to minimise forks, especially on Windows where they are + # unreasonably slow, so skip the feature probes when bash or zsh are + # being used: + if test set = "${BASH_VERSION+set}${ZSH_VERSION+set}"; then + : ${_G_HAVE_ARITH_OP="yes"} + : ${_G_HAVE_XSI_OPS="yes"} + # The += operator was introduced in bash 3.1 + case $BASH_VERSION in + [12].* | 3.0 | 3.0*) ;; + *) + : ${_G_HAVE_PLUSEQ_OP="yes"} + ;; + esac + fi + + # _G_HAVE_PLUSEQ_OP + # Can be empty, in which case the shell is probed, "yes" if += is + # useable or anything else if it does not work. + test -z "$_G_HAVE_PLUSEQ_OP" \ + && (eval 'x=a; x+=" b"; test "a b" = "$x"') 2>/dev/null \ + && _G_HAVE_PLUSEQ_OP=yes + +if test yes = "$_G_HAVE_PLUSEQ_OP" +then + # This is an XSI compatible shell, allowing a faster implementation... + eval 'func_append () + { + $debug_cmd + + eval "$1+=\$2" + }' +else + # ...otherwise fall back to using expr, which is often a shell builtin. + func_append () + { + $debug_cmd + + eval "$1=\$$1\$2" + } +fi + + +# func_append_quoted VAR VALUE +# ---------------------------- +# Quote VALUE and append to the end of shell variable VAR, separated +# by a space. +if test yes = "$_G_HAVE_PLUSEQ_OP"; then + eval 'func_append_quoted () + { + $debug_cmd + + func_quote_arg pretty "$2" + eval "$1+=\\ \$func_quote_arg_result" + }' +else + func_append_quoted () + { + $debug_cmd + + func_quote_arg pretty "$2" + eval "$1=\$$1\\ \$func_quote_arg_result" + } +fi + + +# func_append_uniq VAR VALUE +# -------------------------- +# Append unique VALUE onto the existing contents of VAR, assuming +# entries are delimited by the first character of VALUE. For example: +# +# func_append_uniq options " --another-option option-argument" +# +# will only append to $options if " --another-option option-argument " +# is not already present somewhere in $options already (note spaces at +# each end implied by leading space in second argument). +func_append_uniq () +{ + $debug_cmd + + eval _G_current_value='`$ECHO $'$1'`' + _G_delim=`expr "$2" : '\(.\)'` + + case $_G_delim$_G_current_value$_G_delim in + *"$2$_G_delim"*) ;; + *) func_append "$@" ;; + esac +} + + +# func_arith TERM... +# ------------------ +# Set func_arith_result to the result of evaluating TERMs. + test -z "$_G_HAVE_ARITH_OP" \ + && (eval 'test 2 = $(( 1 + 1 ))') 2>/dev/null \ + && _G_HAVE_ARITH_OP=yes + +if test yes = "$_G_HAVE_ARITH_OP"; then + eval 'func_arith () + { + $debug_cmd + + func_arith_result=$(( $* )) + }' +else + func_arith () + { + $debug_cmd + + func_arith_result=`expr "$@"` + } +fi + + +# func_basename FILE +# ------------------ +# Set func_basename_result to FILE with everything up to and including +# the last / stripped. +if test yes = "$_G_HAVE_XSI_OPS"; then + # If this shell supports suffix pattern removal, then use it to avoid + # forking. Hide the definitions single quotes in case the shell chokes + # on unsupported syntax... + _b='func_basename_result=${1##*/}' + _d='case $1 in + */*) func_dirname_result=${1%/*}$2 ;; + * ) func_dirname_result=$3 ;; + esac' + +else + # ...otherwise fall back to using sed. + _b='func_basename_result=`$ECHO "$1" |$SED "$sed_basename"`' + _d='func_dirname_result=`$ECHO "$1" |$SED "$sed_dirname"` + if test "X$func_dirname_result" = "X$1"; then + func_dirname_result=$3 + else + func_append func_dirname_result "$2" + fi' +fi + +eval 'func_basename () +{ + $debug_cmd + + '"$_b"' +}' + + +# func_dirname FILE APPEND NONDIR_REPLACEMENT +# ------------------------------------------- +# Compute the dirname of FILE. If nonempty, add APPEND to the result, +# otherwise set result to NONDIR_REPLACEMENT. +eval 'func_dirname () +{ + $debug_cmd + + '"$_d"' +}' + + +# func_dirname_and_basename FILE APPEND NONDIR_REPLACEMENT +# -------------------------------------------------------- +# Perform func_basename and func_dirname in a single function +# call: +# dirname: Compute the dirname of FILE. If nonempty, +# add APPEND to the result, otherwise set result +# to NONDIR_REPLACEMENT. +# value returned in "$func_dirname_result" +# basename: Compute filename of FILE. +# value retuned in "$func_basename_result" +# For efficiency, we do not delegate to the functions above but instead +# duplicate the functionality here. +eval 'func_dirname_and_basename () +{ + $debug_cmd + + '"$_b"' + '"$_d"' +}' + + +# func_echo ARG... +# ---------------- +# Echo program name prefixed message. +func_echo () +{ + $debug_cmd + + _G_message=$* + + func_echo_IFS=$IFS + IFS=$nl + for _G_line in $_G_message; do + IFS=$func_echo_IFS + $ECHO "$progname: $_G_line" + done + IFS=$func_echo_IFS +} + + +# func_echo_all ARG... +# -------------------- +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "$*" +} + + +# func_echo_infix_1 INFIX ARG... +# ------------------------------ +# Echo program name, followed by INFIX on the first line, with any +# additional lines not showing INFIX. +func_echo_infix_1 () +{ + $debug_cmd + + $require_term_colors + + _G_infix=$1; shift + _G_indent=$_G_infix + _G_prefix="$progname: $_G_infix: " + _G_message=$* + + # Strip color escape sequences before counting printable length + for _G_tc in "$tc_reset" "$tc_bold" "$tc_standout" "$tc_red" "$tc_green" "$tc_blue" "$tc_cyan" + do + test -n "$_G_tc" && { + _G_esc_tc=`$ECHO "$_G_tc" | $SED "$sed_make_literal_regex"` + _G_indent=`$ECHO "$_G_indent" | $SED "s|$_G_esc_tc||g"` + } + done + _G_indent="$progname: "`echo "$_G_indent" | $SED 's|.| |g'`" " ## exclude from sc_prohibit_nested_quotes + + func_echo_infix_1_IFS=$IFS + IFS=$nl + for _G_line in $_G_message; do + IFS=$func_echo_infix_1_IFS + $ECHO "$_G_prefix$tc_bold$_G_line$tc_reset" >&2 + _G_prefix=$_G_indent + done + IFS=$func_echo_infix_1_IFS +} + + +# func_error ARG... +# ----------------- +# Echo program name prefixed message to standard error. +func_error () +{ + $debug_cmd + + $require_term_colors + + func_echo_infix_1 " $tc_standout${tc_red}error$tc_reset" "$*" >&2 +} + + +# func_fatal_error ARG... +# ----------------------- +# Echo program name prefixed message to standard error, and exit. +func_fatal_error () +{ + $debug_cmd + + func_error "$*" + exit $EXIT_FAILURE +} + + +# func_grep EXPRESSION FILENAME +# ----------------------------- +# Check whether EXPRESSION matches any line of FILENAME, without output. +func_grep () +{ + $debug_cmd + + $GREP "$1" "$2" >/dev/null 2>&1 +} + + +# func_len STRING +# --------------- +# Set func_len_result to the length of STRING. STRING may not +# start with a hyphen. + test -z "$_G_HAVE_XSI_OPS" \ + && (eval 'x=a/b/c; + test 5aa/bb/cc = "${#x}${x%%/*}${x%/*}${x#*/}${x##*/}"') 2>/dev/null \ + && _G_HAVE_XSI_OPS=yes + +if test yes = "$_G_HAVE_XSI_OPS"; then + eval 'func_len () + { + $debug_cmd + + func_len_result=${#1} + }' +else + func_len () + { + $debug_cmd + + func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` + } +fi + + +# func_mkdir_p DIRECTORY-PATH +# --------------------------- +# Make sure the entire path to DIRECTORY-PATH is available. +func_mkdir_p () +{ + $debug_cmd + + _G_directory_path=$1 + _G_dir_list= + + if test -n "$_G_directory_path" && test : != "$opt_dry_run"; then + + # Protect directory names starting with '-' + case $_G_directory_path in + -*) _G_directory_path=./$_G_directory_path ;; + esac + + # While some portion of DIR does not yet exist... + while test ! -d "$_G_directory_path"; do + # ...make a list in topmost first order. Use a colon delimited + # list incase some portion of path contains whitespace. + _G_dir_list=$_G_directory_path:$_G_dir_list + + # If the last portion added has no slash in it, the list is done + case $_G_directory_path in */*) ;; *) break ;; esac + + # ...otherwise throw away the child directory and loop + _G_directory_path=`$ECHO "$_G_directory_path" | $SED -e "$sed_dirname"` + done + _G_dir_list=`$ECHO "$_G_dir_list" | $SED 's|:*$||'` + + func_mkdir_p_IFS=$IFS; IFS=: + for _G_dir in $_G_dir_list; do + IFS=$func_mkdir_p_IFS + # mkdir can fail with a 'File exist' error if two processes + # try to create one of the directories concurrently. Don't + # stop in that case! + $MKDIR "$_G_dir" 2>/dev/null || : + done + IFS=$func_mkdir_p_IFS + + # Bail out if we (or some other process) failed to create a directory. + test -d "$_G_directory_path" || \ + func_fatal_error "Failed to create '$1'" + fi +} + + +# func_mktempdir [BASENAME] +# ------------------------- +# Make a temporary directory that won't clash with other running +# libtool processes, and avoids race conditions if possible. If +# given, BASENAME is the basename for that directory. +func_mktempdir () +{ + $debug_cmd + + _G_template=${TMPDIR-/tmp}/${1-$progname} + + if test : = "$opt_dry_run"; then + # Return a directory name, but don't create it in dry-run mode + _G_tmpdir=$_G_template-$$ + else + + # If mktemp works, use that first and foremost + _G_tmpdir=`mktemp -d "$_G_template-XXXXXXXX" 2>/dev/null` + + if test ! -d "$_G_tmpdir"; then + # Failing that, at least try and use $RANDOM to avoid a race + _G_tmpdir=$_G_template-${RANDOM-0}$$ + + func_mktempdir_umask=`umask` + umask 0077 + $MKDIR "$_G_tmpdir" + umask $func_mktempdir_umask + fi + + # If we're not in dry-run mode, bomb out on failure + test -d "$_G_tmpdir" || \ + func_fatal_error "cannot create temporary directory '$_G_tmpdir'" + fi + + $ECHO "$_G_tmpdir" +} + + +# func_normal_abspath PATH +# ------------------------ +# Remove doubled-up and trailing slashes, "." path components, +# and cancel out any ".." path components in PATH after making +# it an absolute path. +func_normal_abspath () +{ + $debug_cmd + + # These SED scripts presuppose an absolute path with a trailing slash. + _G_pathcar='s|^/\([^/]*\).*$|\1|' + _G_pathcdr='s|^/[^/]*||' + _G_removedotparts=':dotsl + s|/\./|/|g + t dotsl + s|/\.$|/|' + _G_collapseslashes='s|/\{1,\}|/|g' + _G_finalslash='s|/*$|/|' + + # Start from root dir and reassemble the path. + func_normal_abspath_result= + func_normal_abspath_tpath=$1 + func_normal_abspath_altnamespace= + case $func_normal_abspath_tpath in + "") + # Empty path, that just means $cwd. + func_stripname '' '/' "`pwd`" + func_normal_abspath_result=$func_stripname_result + return + ;; + # The next three entries are used to spot a run of precisely + # two leading slashes without using negated character classes; + # we take advantage of case's first-match behaviour. + ///*) + # Unusual form of absolute path, do nothing. + ;; + //*) + # Not necessarily an ordinary path; POSIX reserves leading '//' + # and for example Cygwin uses it to access remote file shares + # over CIFS/SMB, so we conserve a leading double slash if found. + func_normal_abspath_altnamespace=/ + ;; + /*) + # Absolute path, do nothing. + ;; + *) + # Relative path, prepend $cwd. + func_normal_abspath_tpath=`pwd`/$func_normal_abspath_tpath + ;; + esac + + # Cancel out all the simple stuff to save iterations. We also want + # the path to end with a slash for ease of parsing, so make sure + # there is one (and only one) here. + func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$_G_removedotparts" -e "$_G_collapseslashes" -e "$_G_finalslash"` + while :; do + # Processed it all yet? + if test / = "$func_normal_abspath_tpath"; then + # If we ascended to the root using ".." the result may be empty now. + if test -z "$func_normal_abspath_result"; then + func_normal_abspath_result=/ + fi + break + fi + func_normal_abspath_tcomponent=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$_G_pathcar"` + func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$_G_pathcdr"` + # Figure out what to do with it + case $func_normal_abspath_tcomponent in + "") + # Trailing empty path component, ignore it. + ;; + ..) + # Parent dir; strip last assembled component from result. + func_dirname "$func_normal_abspath_result" + func_normal_abspath_result=$func_dirname_result + ;; + *) + # Actual path component, append it. + func_append func_normal_abspath_result "/$func_normal_abspath_tcomponent" + ;; + esac + done + # Restore leading double-slash if one was found on entry. + func_normal_abspath_result=$func_normal_abspath_altnamespace$func_normal_abspath_result +} + + +# func_notquiet ARG... +# -------------------- +# Echo program name prefixed message only when not in quiet mode. +func_notquiet () +{ + $debug_cmd + + $opt_quiet || func_echo ${1+"$@"} + + # A bug in bash halts the script if the last line of a function + # fails when set -e is in force, so we need another command to + # work around that: + : +} + + +# func_relative_path SRCDIR DSTDIR +# -------------------------------- +# Set func_relative_path_result to the relative path from SRCDIR to DSTDIR. +func_relative_path () +{ + $debug_cmd + + func_relative_path_result= + func_normal_abspath "$1" + func_relative_path_tlibdir=$func_normal_abspath_result + func_normal_abspath "$2" + func_relative_path_tbindir=$func_normal_abspath_result + + # Ascend the tree starting from libdir + while :; do + # check if we have found a prefix of bindir + case $func_relative_path_tbindir in + $func_relative_path_tlibdir) + # found an exact match + func_relative_path_tcancelled= + break + ;; + $func_relative_path_tlibdir*) + # found a matching prefix + func_stripname "$func_relative_path_tlibdir" '' "$func_relative_path_tbindir" + func_relative_path_tcancelled=$func_stripname_result + if test -z "$func_relative_path_result"; then + func_relative_path_result=. + fi + break + ;; + *) + func_dirname $func_relative_path_tlibdir + func_relative_path_tlibdir=$func_dirname_result + if test -z "$func_relative_path_tlibdir"; then + # Have to descend all the way to the root! + func_relative_path_result=../$func_relative_path_result + func_relative_path_tcancelled=$func_relative_path_tbindir + break + fi + func_relative_path_result=../$func_relative_path_result + ;; + esac + done + + # Now calculate path; take care to avoid doubling-up slashes. + func_stripname '' '/' "$func_relative_path_result" + func_relative_path_result=$func_stripname_result + func_stripname '/' '/' "$func_relative_path_tcancelled" + if test -n "$func_stripname_result"; then + func_append func_relative_path_result "/$func_stripname_result" + fi + + # Normalisation. If bindir is libdir, return '.' else relative path. + if test -n "$func_relative_path_result"; then + func_stripname './' '' "$func_relative_path_result" + func_relative_path_result=$func_stripname_result + fi + + test -n "$func_relative_path_result" || func_relative_path_result=. + + : +} + + +# func_quote_portable EVAL ARG +# ---------------------------- +# Internal function to portably implement func_quote_arg. Note that we still +# keep attention to performance here so we as much as possible try to avoid +# calling sed binary (so far O(N) complexity as long as func_append is O(1)). +func_quote_portable () +{ + $debug_cmd + + $require_check_ifs_backslash + + func_quote_portable_result=$2 + + # one-time-loop (easy break) + while true + do + if $1; then + func_quote_portable_result=`$ECHO "$2" | $SED \ + -e "$sed_double_quote_subst" -e "$sed_double_backslash"` + break + fi + + # Quote for eval. + case $func_quote_portable_result in + *[\\\`\"\$]*) + # Fallback to sed for $func_check_bs_ifs_broken=:, or when the string + # contains the shell wildcard characters. + case $check_ifs_backshlash_broken$func_quote_portable_result in + :*|*[\[\*\?]*) + func_quote_portable_result=`$ECHO "$func_quote_portable_result" \ + | $SED "$sed_quote_subst"` + break + ;; + esac + + func_quote_portable_old_IFS=$IFS + for _G_char in '\' '`' '"' '$' + do + # STATE($1) PREV($2) SEPARATOR($3) + set start "" "" + func_quote_portable_result=dummy"$_G_char$func_quote_portable_result$_G_char"dummy + IFS=$_G_char + for _G_part in $func_quote_portable_result + do + case $1 in + quote) + func_append func_quote_portable_result "$3$2" + set quote "$_G_part" "\\$_G_char" + ;; + start) + set first "" "" + func_quote_portable_result= + ;; + first) + set quote "$_G_part" "" + ;; + esac + done + done + IFS=$func_quote_portable_old_IFS + ;; + *) ;; + esac + break + done + + func_quote_portable_unquoted_result=$func_quote_portable_result + case $func_quote_portable_result in + # double-quote args containing shell metacharacters to delay + # word splitting, command substitution and variable expansion + # for a subsequent eval. + # many bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + func_quote_portable_result=\"$func_quote_portable_result\" + ;; + esac +} + + +# func_quotefast_eval ARG +# ----------------------- +# Quote one ARG (internal). This is equivalent to 'func_quote_arg eval ARG', +# but optimized for speed. Result is stored in $func_quotefast_eval. +if test xyes = `(x=; printf -v x %q yes; echo x"$x") 2>/dev/null`; then + printf -v _GL_test_printf_tilde %q '~' + if test '\~' = "$_GL_test_printf_tilde"; then + func_quotefast_eval () + { + printf -v func_quotefast_eval_result %q "$1" + } + else + # Broken older Bash implementations. Make those faster too if possible. + func_quotefast_eval () + { + case $1 in + '~'*) + func_quote_portable false "$1" + func_quotefast_eval_result=$func_quote_portable_result + ;; + *) + printf -v func_quotefast_eval_result %q "$1" + ;; + esac + } + fi +else + func_quotefast_eval () + { + func_quote_portable false "$1" + func_quotefast_eval_result=$func_quote_portable_result + } +fi + + +# func_quote_arg MODEs ARG +# ------------------------ +# Quote one ARG to be evaled later. MODEs argument may contain zero or more +# specifiers listed below separated by ',' character. This function returns two +# values: +# i) func_quote_arg_result +# double-quoted (when needed), suitable for a subsequent eval +# ii) func_quote_arg_unquoted_result +# has all characters that are still active within double +# quotes backslashified. Available only if 'unquoted' is specified. +# +# Available modes: +# ---------------- +# 'eval' (default) +# - escape shell special characters +# 'expand' +# - the same as 'eval'; but do not quote variable references +# 'pretty' +# - request aesthetic output, i.e. '"a b"' instead of 'a\ b'. This might +# be used later in func_quote to get output like: 'echo "a b"' instead +# of 'echo a\ b'. This is slower than default on some shells. +# 'unquoted' +# - produce also $func_quote_arg_unquoted_result which does not contain +# wrapping double-quotes. +# +# Examples for 'func_quote_arg pretty,unquoted string': +# +# string | *_result | *_unquoted_result +# ------------+-----------------------+------------------- +# " | \" | \" +# a b | "a b" | a b +# "a b" | "\"a b\"" | \"a b\" +# * | "*" | * +# z="${x-$y}" | "z=\"\${x-\$y}\"" | z=\"\${x-\$y}\" +# +# Examples for 'func_quote_arg pretty,unquoted,expand string': +# +# string | *_result | *_unquoted_result +# --------------+---------------------+-------------------- +# z="${x-$y}" | "z=\"${x-$y}\"" | z=\"${x-$y}\" +func_quote_arg () +{ + _G_quote_expand=false + case ,$1, in + *,expand,*) + _G_quote_expand=: + ;; + esac + + case ,$1, in + *,pretty,*|*,expand,*|*,unquoted,*) + func_quote_portable $_G_quote_expand "$2" + func_quote_arg_result=$func_quote_portable_result + func_quote_arg_unquoted_result=$func_quote_portable_unquoted_result + ;; + *) + # Faster quote-for-eval for some shells. + func_quotefast_eval "$2" + func_quote_arg_result=$func_quotefast_eval_result + ;; + esac +} + + +# func_quote MODEs ARGs... +# ------------------------ +# Quote all ARGs to be evaled later and join them into single command. See +# func_quote_arg's description for more info. +func_quote () +{ + $debug_cmd + _G_func_quote_mode=$1 ; shift + func_quote_result= + while test 0 -lt $#; do + func_quote_arg "$_G_func_quote_mode" "$1" + if test -n "$func_quote_result"; then + func_append func_quote_result " $func_quote_arg_result" + else + func_append func_quote_result "$func_quote_arg_result" + fi + shift + done +} + + +# func_stripname PREFIX SUFFIX NAME +# --------------------------------- +# strip PREFIX and SUFFIX from NAME, and store in func_stripname_result. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +if test yes = "$_G_HAVE_XSI_OPS"; then + eval 'func_stripname () + { + $debug_cmd + + # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are + # positional parameters, so assign one to ordinary variable first. + func_stripname_result=$3 + func_stripname_result=${func_stripname_result#"$1"} + func_stripname_result=${func_stripname_result%"$2"} + }' +else + func_stripname () + { + $debug_cmd + + case $2 in + .*) func_stripname_result=`$ECHO "$3" | $SED -e "s%^$1%%" -e "s%\\\\$2\$%%"`;; + *) func_stripname_result=`$ECHO "$3" | $SED -e "s%^$1%%" -e "s%$2\$%%"`;; + esac + } +fi + + +# func_show_eval CMD [FAIL_EXP] +# ----------------------------- +# Unless opt_quiet is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. +func_show_eval () +{ + $debug_cmd + + _G_cmd=$1 + _G_fail_exp=${2-':'} + + func_quote_arg pretty,expand "$_G_cmd" + eval "func_notquiet $func_quote_arg_result" + + $opt_dry_run || { + eval "$_G_cmd" + _G_status=$? + if test 0 -ne "$_G_status"; then + eval "(exit $_G_status); $_G_fail_exp" + fi + } +} + + +# func_show_eval_locale CMD [FAIL_EXP] +# ------------------------------------ +# Unless opt_quiet is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. Use the saved locale for evaluation. +func_show_eval_locale () +{ + $debug_cmd + + _G_cmd=$1 + _G_fail_exp=${2-':'} + + $opt_quiet || { + func_quote_arg expand,pretty "$_G_cmd" + eval "func_echo $func_quote_arg_result" + } + + $opt_dry_run || { + eval "$_G_user_locale + $_G_cmd" + _G_status=$? + eval "$_G_safe_locale" + if test 0 -ne "$_G_status"; then + eval "(exit $_G_status); $_G_fail_exp" + fi + } +} + + +# func_tr_sh +# ---------- +# Turn $1 into a string suitable for a shell variable name. +# Result is stored in $func_tr_sh_result. All characters +# not in the set a-zA-Z0-9_ are replaced with '_'. Further, +# if $1 begins with a digit, a '_' is prepended as well. +func_tr_sh () +{ + $debug_cmd + + case $1 in + [0-9]* | *[!a-zA-Z0-9_]*) + func_tr_sh_result=`$ECHO "$1" | $SED -e 's/^\([0-9]\)/_\1/' -e 's/[^a-zA-Z0-9_]/_/g'` + ;; + * ) + func_tr_sh_result=$1 + ;; + esac +} + + +# func_verbose ARG... +# ------------------- +# Echo program name prefixed message in verbose mode only. +func_verbose () +{ + $debug_cmd + + $opt_verbose && func_echo "$*" + + : +} + + +# func_warn_and_continue ARG... +# ----------------------------- +# Echo program name prefixed warning message to standard error. +func_warn_and_continue () +{ + $debug_cmd + + $require_term_colors + + func_echo_infix_1 "${tc_red}warning$tc_reset" "$*" >&2 +} + + +# func_warning CATEGORY ARG... +# ---------------------------- +# Echo program name prefixed warning message to standard error. Warning +# messages can be filtered according to CATEGORY, where this function +# elides messages where CATEGORY is not listed in the global variable +# 'opt_warning_types'. +func_warning () +{ + $debug_cmd + + # CATEGORY must be in the warning_categories list! + case " $warning_categories " in + *" $1 "*) ;; + *) func_internal_error "invalid warning category '$1'" ;; + esac + + _G_category=$1 + shift + + case " $opt_warning_types " in + *" $_G_category "*) $warning_func ${1+"$@"} ;; + esac +} + + +# func_sort_ver VER1 VER2 +# ----------------------- +# 'sort -V' is not generally available. +# Note this deviates from the version comparison in automake +# in that it treats 1.5 < 1.5.0, and treats 1.4.4a < 1.4-p3a +# but this should suffice as we won't be specifying old +# version formats or redundant trailing .0 in bootstrap.conf. +# If we did want full compatibility then we should probably +# use m4_version_compare from autoconf. +func_sort_ver () +{ + $debug_cmd + + printf '%s\n%s\n' "$1" "$2" \ + | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n -k 5,5n -k 6,6n -k 7,7n -k 8,8n -k 9,9n +} + +# func_lt_ver PREV CURR +# --------------------- +# Return true if PREV and CURR are in the correct order according to +# func_sort_ver, otherwise false. Use it like this: +# +# func_lt_ver "$prev_ver" "$proposed_ver" || func_fatal_error "..." +func_lt_ver () +{ + $debug_cmd + + test "x$1" = x`func_sort_ver "$1" "$2" | $SED 1q` +} + + +# Local variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-pattern: "10/scriptversion=%:y-%02m-%02d.%02H; # UTC" +# time-stamp-time-zone: "UTC" +# End: +#! /bin/sh + +# A portable, pluggable option parser for Bourne shell. +# Written by Gary V. Vaughan, 2010 + +# This is free software. There is NO warranty; not even for +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# +# Copyright (C) 2010-2019, 2021 Bootstrap Authors +# +# This file is dual licensed under the terms of the MIT license +# , and GPL version 2 or later +# . You must apply one of +# these licenses when using or redistributing this software or any of +# the files within it. See the URLs above, or the file `LICENSE` +# included in the Bootstrap distribution for the full license texts. + +# Please report bugs or propose patches to: +# + +# Set a version string for this script. +scriptversion=2019-02-19.15; # UTC + + +## ------ ## +## Usage. ## +## ------ ## + +# This file is a library for parsing options in your shell scripts along +# with assorted other useful supporting features that you can make use +# of too. +# +# For the simplest scripts you might need only: +# +# #!/bin/sh +# . relative/path/to/funclib.sh +# . relative/path/to/options-parser +# scriptversion=1.0 +# func_options ${1+"$@"} +# eval set dummy "$func_options_result"; shift +# ...rest of your script... +# +# In order for the '--version' option to work, you will need to have a +# suitably formatted comment like the one at the top of this file +# starting with '# Written by ' and ending with '# Copyright'. +# +# For '-h' and '--help' to work, you will also need a one line +# description of your script's purpose in a comment directly above the +# '# Written by ' line, like the one at the top of this file. +# +# The default options also support '--debug', which will turn on shell +# execution tracing (see the comment above debug_cmd below for another +# use), and '--verbose' and the func_verbose function to allow your script +# to display verbose messages only when your user has specified +# '--verbose'. +# +# After sourcing this file, you can plug in processing for additional +# options by amending the variables from the 'Configuration' section +# below, and following the instructions in the 'Option parsing' +# section further down. + +## -------------- ## +## Configuration. ## +## -------------- ## + +# You should override these variables in your script after sourcing this +# file so that they reflect the customisations you have added to the +# option parser. + +# The usage line for option parsing errors and the start of '-h' and +# '--help' output messages. You can embed shell variables for delayed +# expansion at the time the message is displayed, but you will need to +# quote other shell meta-characters carefully to prevent them being +# expanded when the contents are evaled. +usage='$progpath [OPTION]...' + +# Short help message in response to '-h' and '--help'. Add to this or +# override it after sourcing this library to reflect the full set of +# options your script accepts. +usage_message="\ + --debug enable verbose shell tracing + -W, --warnings=CATEGORY + report the warnings falling in CATEGORY [all] + -v, --verbose verbosely report processing + --version print version information and exit + -h, --help print short or long help message and exit +" + +# Additional text appended to 'usage_message' in response to '--help'. +long_help_message=" +Warning categories include: + 'all' show all warnings + 'none' turn off all the warnings + 'error' warnings are treated as fatal errors" + +# Help message printed before fatal option parsing errors. +fatal_help="Try '\$progname --help' for more information." + + + +## ------------------------- ## +## Hook function management. ## +## ------------------------- ## + +# This section contains functions for adding, removing, and running hooks +# in the main code. A hook is just a list of function names that can be +# run in order later on. + +# func_hookable FUNC_NAME +# ----------------------- +# Declare that FUNC_NAME will run hooks added with +# 'func_add_hook FUNC_NAME ...'. +func_hookable () +{ + $debug_cmd + + func_append hookable_fns " $1" +} + + +# func_add_hook FUNC_NAME HOOK_FUNC +# --------------------------------- +# Request that FUNC_NAME call HOOK_FUNC before it returns. FUNC_NAME must +# first have been declared "hookable" by a call to 'func_hookable'. +func_add_hook () +{ + $debug_cmd + + case " $hookable_fns " in + *" $1 "*) ;; + *) func_fatal_error "'$1' does not accept hook functions." ;; + esac + + eval func_append ${1}_hooks '" $2"' +} + + +# func_remove_hook FUNC_NAME HOOK_FUNC +# ------------------------------------ +# Remove HOOK_FUNC from the list of hook functions to be called by +# FUNC_NAME. +func_remove_hook () +{ + $debug_cmd + + eval ${1}_hooks='`$ECHO "\$'$1'_hooks" |$SED "s| '$2'||"`' +} + + +# func_propagate_result FUNC_NAME_A FUNC_NAME_B +# --------------------------------------------- +# If the *_result variable of FUNC_NAME_A _is set_, assign its value to +# *_result variable of FUNC_NAME_B. +func_propagate_result () +{ + $debug_cmd + + func_propagate_result_result=: + if eval "test \"\${${1}_result+set}\" = set" + then + eval "${2}_result=\$${1}_result" + else + func_propagate_result_result=false + fi +} + + +# func_run_hooks FUNC_NAME [ARG]... +# --------------------------------- +# Run all hook functions registered to FUNC_NAME. +# It's assumed that the list of hook functions contains nothing more +# than a whitespace-delimited list of legal shell function names, and +# no effort is wasted trying to catch shell meta-characters or preserve +# whitespace. +func_run_hooks () +{ + $debug_cmd + + case " $hookable_fns " in + *" $1 "*) ;; + *) func_fatal_error "'$1' does not support hook functions." ;; + esac + + eval _G_hook_fns=\$$1_hooks; shift + + for _G_hook in $_G_hook_fns; do + func_unset "${_G_hook}_result" + eval $_G_hook '${1+"$@"}' + func_propagate_result $_G_hook func_run_hooks + if $func_propagate_result_result; then + eval set dummy "$func_run_hooks_result"; shift + fi + done +} + + + +## --------------- ## +## Option parsing. ## +## --------------- ## + +# In order to add your own option parsing hooks, you must accept the +# full positional parameter list from your hook function. You may remove +# or edit any options that you action, and then pass back the remaining +# unprocessed options in '_result', escaped +# suitably for 'eval'. +# +# The '_result' variable is automatically unset +# before your hook gets called; for best performance, only set the +# *_result variable when necessary (i.e. don't call the 'func_quote' +# function unnecessarily because it can be an expensive operation on some +# machines). +# +# Like this: +# +# my_options_prep () +# { +# $debug_cmd +# +# # Extend the existing usage message. +# usage_message=$usage_message' +# -s, --silent don'\''t print informational messages +# ' +# # No change in '$@' (ignored completely by this hook). Leave +# # my_options_prep_result variable intact. +# } +# func_add_hook func_options_prep my_options_prep +# +# +# my_silent_option () +# { +# $debug_cmd +# +# args_changed=false +# +# # Note that, for efficiency, we parse as many options as we can +# # recognise in a loop before passing the remainder back to the +# # caller on the first unrecognised argument we encounter. +# while test $# -gt 0; do +# opt=$1; shift +# case $opt in +# --silent|-s) opt_silent=: +# args_changed=: +# ;; +# # Separate non-argument short options: +# -s*) func_split_short_opt "$_G_opt" +# set dummy "$func_split_short_opt_name" \ +# "-$func_split_short_opt_arg" ${1+"$@"} +# shift +# args_changed=: +# ;; +# *) # Make sure the first unrecognised option "$_G_opt" +# # is added back to "$@" in case we need it later, +# # if $args_changed was set to 'true'. +# set dummy "$_G_opt" ${1+"$@"}; shift; break ;; +# esac +# done +# +# # Only call 'func_quote' here if we processed at least one argument. +# if $args_changed; then +# func_quote eval ${1+"$@"} +# my_silent_option_result=$func_quote_result +# fi +# } +# func_add_hook func_parse_options my_silent_option +# +# +# my_option_validation () +# { +# $debug_cmd +# +# $opt_silent && $opt_verbose && func_fatal_help "\ +# '--silent' and '--verbose' options are mutually exclusive." +# } +# func_add_hook func_validate_options my_option_validation +# +# You'll also need to manually amend $usage_message to reflect the extra +# options you parse. It's preferable to append if you can, so that +# multiple option parsing hooks can be added safely. + + +# func_options_finish [ARG]... +# ---------------------------- +# Finishing the option parse loop (call 'func_options' hooks ATM). +func_options_finish () +{ + $debug_cmd + + func_run_hooks func_options ${1+"$@"} + func_propagate_result func_run_hooks func_options_finish +} + + +# func_options [ARG]... +# --------------------- +# All the functions called inside func_options are hookable. See the +# individual implementations for details. +func_hookable func_options +func_options () +{ + $debug_cmd + + _G_options_quoted=false + + for my_func in options_prep parse_options validate_options options_finish + do + func_unset func_${my_func}_result + func_unset func_run_hooks_result + eval func_$my_func '${1+"$@"}' + func_propagate_result func_$my_func func_options + if $func_propagate_result_result; then + eval set dummy "$func_options_result"; shift + _G_options_quoted=: + fi + done + + $_G_options_quoted || { + # As we (func_options) are top-level options-parser function and + # nobody quoted "$@" for us yet, we need to do it explicitly for + # caller. + func_quote eval ${1+"$@"} + func_options_result=$func_quote_result + } +} + + +# func_options_prep [ARG]... +# -------------------------- +# All initialisations required before starting the option parse loop. +# Note that when calling hook functions, we pass through the list of +# positional parameters. If a hook function modifies that list, and +# needs to propagate that back to rest of this script, then the complete +# modified list must be put in 'func_run_hooks_result' before returning. +func_hookable func_options_prep +func_options_prep () +{ + $debug_cmd + + # Option defaults: + opt_verbose=false + opt_warning_types= + + func_run_hooks func_options_prep ${1+"$@"} + func_propagate_result func_run_hooks func_options_prep +} + + +# func_parse_options [ARG]... +# --------------------------- +# The main option parsing loop. +func_hookable func_parse_options +func_parse_options () +{ + $debug_cmd + + _G_parse_options_requote=false + # this just eases exit handling + while test $# -gt 0; do + # Defer to hook functions for initial option parsing, so they + # get priority in the event of reusing an option name. + func_run_hooks func_parse_options ${1+"$@"} + func_propagate_result func_run_hooks func_parse_options + if $func_propagate_result_result; then + eval set dummy "$func_parse_options_result"; shift + # Even though we may have changed "$@", we passed the "$@" array + # down into the hook and it quoted it for us (because we are in + # this if-branch). No need to quote it again. + _G_parse_options_requote=false + fi + + # Break out of the loop if we already parsed every option. + test $# -gt 0 || break + + # We expect that one of the options parsed in this function matches + # and thus we remove _G_opt from "$@" and need to re-quote. + _G_match_parse_options=: + _G_opt=$1 + shift + case $_G_opt in + --debug|-x) debug_cmd='set -x' + func_echo "enabling shell trace mode" >&2 + $debug_cmd + ;; + + --no-warnings|--no-warning|--no-warn) + set dummy --warnings none ${1+"$@"} + shift + ;; + + --warnings|--warning|-W) + if test $# = 0 && func_missing_arg $_G_opt; then + _G_parse_options_requote=: + break + fi + case " $warning_categories $1" in + *" $1 "*) + # trailing space prevents matching last $1 above + func_append_uniq opt_warning_types " $1" + ;; + *all) + opt_warning_types=$warning_categories + ;; + *none) + opt_warning_types=none + warning_func=: + ;; + *error) + opt_warning_types=$warning_categories + warning_func=func_fatal_error + ;; + *) + func_fatal_error \ + "unsupported warning category: '$1'" + ;; + esac + shift + ;; + + --verbose|-v) opt_verbose=: ;; + --version) func_version ;; + -\?|-h) func_usage ;; + --help) func_help ;; + + # Separate optargs to long options (plugins may need this): + --*=*) func_split_equals "$_G_opt" + set dummy "$func_split_equals_lhs" \ + "$func_split_equals_rhs" ${1+"$@"} + shift + ;; + + # Separate optargs to short options: + -W*) + func_split_short_opt "$_G_opt" + set dummy "$func_split_short_opt_name" \ + "$func_split_short_opt_arg" ${1+"$@"} + shift + ;; + + # Separate non-argument short options: + -\?*|-h*|-v*|-x*) + func_split_short_opt "$_G_opt" + set dummy "$func_split_short_opt_name" \ + "-$func_split_short_opt_arg" ${1+"$@"} + shift + ;; + + --) _G_parse_options_requote=: ; break ;; + -*) func_fatal_help "unrecognised option: '$_G_opt'" ;; + *) set dummy "$_G_opt" ${1+"$@"}; shift + _G_match_parse_options=false + break + ;; + esac + + if $_G_match_parse_options; then + _G_parse_options_requote=: + fi + done + + if $_G_parse_options_requote; then + # save modified positional parameters for caller + func_quote eval ${1+"$@"} + func_parse_options_result=$func_quote_result + fi +} + + +# func_validate_options [ARG]... +# ------------------------------ +# Perform any sanity checks on option settings and/or unconsumed +# arguments. +func_hookable func_validate_options +func_validate_options () +{ + $debug_cmd + + # Display all warnings if -W was not given. + test -n "$opt_warning_types" || opt_warning_types=" $warning_categories" + + func_run_hooks func_validate_options ${1+"$@"} + func_propagate_result func_run_hooks func_validate_options + + # Bail if the options were screwed! + $exit_cmd $EXIT_FAILURE +} + + + +## ----------------- ## +## Helper functions. ## +## ----------------- ## + +# This section contains the helper functions used by the rest of the +# hookable option parser framework in ascii-betical order. + + +# func_fatal_help ARG... +# ---------------------- +# Echo program name prefixed message to standard error, followed by +# a help hint, and exit. +func_fatal_help () +{ + $debug_cmd + + eval \$ECHO \""Usage: $usage"\" + eval \$ECHO \""$fatal_help"\" + func_error ${1+"$@"} + exit $EXIT_FAILURE +} + + +# func_help +# --------- +# Echo long help message to standard output and exit. +func_help () +{ + $debug_cmd + + func_usage_message + $ECHO "$long_help_message" + exit 0 +} + + +# func_missing_arg ARGNAME +# ------------------------ +# Echo program name prefixed message to standard error and set global +# exit_cmd. +func_missing_arg () +{ + $debug_cmd + + func_error "Missing argument for '$1'." + exit_cmd=exit +} + + +# func_split_equals STRING +# ------------------------ +# Set func_split_equals_lhs and func_split_equals_rhs shell variables +# after splitting STRING at the '=' sign. +test -z "$_G_HAVE_XSI_OPS" \ + && (eval 'x=a/b/c; + test 5aa/bb/cc = "${#x}${x%%/*}${x%/*}${x#*/}${x##*/}"') 2>/dev/null \ + && _G_HAVE_XSI_OPS=yes + +if test yes = "$_G_HAVE_XSI_OPS" +then + # This is an XSI compatible shell, allowing a faster implementation... + eval 'func_split_equals () + { + $debug_cmd + + func_split_equals_lhs=${1%%=*} + func_split_equals_rhs=${1#*=} + if test "x$func_split_equals_lhs" = "x$1"; then + func_split_equals_rhs= + fi + }' +else + # ...otherwise fall back to using expr, which is often a shell builtin. + func_split_equals () + { + $debug_cmd + + func_split_equals_lhs=`expr "x$1" : 'x\([^=]*\)'` + func_split_equals_rhs= + test "x$func_split_equals_lhs=" = "x$1" \ + || func_split_equals_rhs=`expr "x$1" : 'x[^=]*=\(.*\)$'` + } +fi #func_split_equals + + +# func_split_short_opt SHORTOPT +# ----------------------------- +# Set func_split_short_opt_name and func_split_short_opt_arg shell +# variables after splitting SHORTOPT after the 2nd character. +if test yes = "$_G_HAVE_XSI_OPS" +then + # This is an XSI compatible shell, allowing a faster implementation... + eval 'func_split_short_opt () + { + $debug_cmd + + func_split_short_opt_arg=${1#??} + func_split_short_opt_name=${1%"$func_split_short_opt_arg"} + }' +else + # ...otherwise fall back to using expr, which is often a shell builtin. + func_split_short_opt () + { + $debug_cmd + + func_split_short_opt_name=`expr "x$1" : 'x\(-.\)'` + func_split_short_opt_arg=`expr "x$1" : 'x-.\(.*\)$'` + } +fi #func_split_short_opt + + +# func_usage +# ---------- +# Echo short help message to standard output and exit. +func_usage () +{ + $debug_cmd + + func_usage_message + $ECHO "Run '$progname --help |${PAGER-more}' for full usage" + exit 0 +} + + +# func_usage_message +# ------------------ +# Echo short help message to standard output. +func_usage_message () +{ + $debug_cmd + + eval \$ECHO \""Usage: $usage"\" + echo + $SED -n 's|^# || + /^Written by/{ + x;p;x + } + h + /^Written by/q' < "$progpath" + echo + eval \$ECHO \""$usage_message"\" +} + + +# func_version +# ------------ +# Echo version message to standard output and exit. +# The version message is extracted from the calling file's header +# comments, with leading '# ' stripped: +# 1. First display the progname and version +# 2. Followed by the header comment line matching /^# Written by / +# 3. Then a blank line followed by the first following line matching +# /^# Copyright / +# 4. Immediately followed by any lines between the previous matches, +# except lines preceding the intervening completely blank line. +# For example, see the header comments of this file. +func_version () +{ + $debug_cmd + + printf '%s\n' "$progname $scriptversion" + $SED -n ' + /^# Written by /!b + s|^# ||; p; n + + :fwd2blnk + /./ { + n + b fwd2blnk + } + p; n + + :holdwrnt + s|^# || + s|^# *$|| + /^Copyright /!{ + /./H + n + b holdwrnt + } + + s|\((C)\)[ 0-9,-]*[ ,-]\([1-9][0-9]* \)|\1 \2| + G + s|\(\n\)\n*|\1|g + p; q' < "$progpath" + + exit $? +} + + +# Local variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-pattern: "30/scriptversion=%:y-%02m-%02d.%02H; # UTC" +# time-stamp-time-zone: "UTC" +# End: + +# Set a version string. +scriptversion='(GNU libtool) 2.4.7' + + +# func_echo ARG... +# ---------------- +# Libtool also displays the current mode in messages, so override +# funclib.sh func_echo with this custom definition. +func_echo () +{ + $debug_cmd + + _G_message=$* + + func_echo_IFS=$IFS + IFS=$nl + for _G_line in $_G_message; do + IFS=$func_echo_IFS + $ECHO "$progname${opt_mode+: $opt_mode}: $_G_line" + done + IFS=$func_echo_IFS +} + + +# func_warning ARG... +# ------------------- +# Libtool warnings are not categorized, so override funclib.sh +# func_warning with this simpler definition. +func_warning () +{ + $debug_cmd + + $warning_func ${1+"$@"} +} + + +## ---------------- ## +## Options parsing. ## +## ---------------- ## + +# Hook in the functions to make sure our own options are parsed during +# the option parsing loop. + +usage='$progpath [OPTION]... [MODE-ARG]...' + +# Short help message in response to '-h'. +usage_message="Options: + --config show all configuration variables + --debug enable verbose shell tracing + -n, --dry-run display commands without modifying any files + --features display basic configuration information and exit + --mode=MODE use operation mode MODE + --no-warnings equivalent to '-Wnone' + --preserve-dup-deps don't remove duplicate dependency libraries + --quiet, --silent don't print informational messages + --tag=TAG use configuration variables from tag TAG + -v, --verbose print more informational messages than default + --version print version information + -W, --warnings=CATEGORY report the warnings falling in CATEGORY [all] + -h, --help, --help-all print short, long, or detailed help message +" + +# Additional text appended to 'usage_message' in response to '--help'. +func_help () +{ + $debug_cmd + + func_usage_message + $ECHO "$long_help_message + +MODE must be one of the following: + + clean remove files from the build directory + compile compile a source file into a libtool object + execute automatically set library path, then run a program + finish complete the installation of libtool libraries + install install libraries or executables + link create a library or an executable + uninstall remove libraries from an installed directory + +MODE-ARGS vary depending on the MODE. When passed as first option, +'--mode=MODE' may be abbreviated as 'MODE' or a unique abbreviation of that. +Try '$progname --help --mode=MODE' for a more detailed description of MODE. + +When reporting a bug, please describe a test case to reproduce it and +include the following information: + + host-triplet: $host + shell: $SHELL + compiler: $LTCC + compiler flags: $LTCFLAGS + linker: $LD (gnu? $with_gnu_ld) + version: $progname (GNU libtool) 2.4.7 + automake: `($AUTOMAKE --version) 2>/dev/null |$SED 1q` + autoconf: `($AUTOCONF --version) 2>/dev/null |$SED 1q` + +Report bugs to . +GNU libtool home page: . +General help using GNU software: ." + exit 0 +} + + +# func_lo2o OBJECT-NAME +# --------------------- +# Transform OBJECT-NAME from a '.lo' suffix to the platform specific +# object suffix. + +lo2o=s/\\.lo\$/.$objext/ +o2lo=s/\\.$objext\$/.lo/ + +if test yes = "$_G_HAVE_XSI_OPS"; then + eval 'func_lo2o () + { + case $1 in + *.lo) func_lo2o_result=${1%.lo}.$objext ;; + * ) func_lo2o_result=$1 ;; + esac + }' + + # func_xform LIBOBJ-OR-SOURCE + # --------------------------- + # Transform LIBOBJ-OR-SOURCE from a '.o' or '.c' (or otherwise) + # suffix to a '.lo' libtool-object suffix. + eval 'func_xform () + { + func_xform_result=${1%.*}.lo + }' +else + # ...otherwise fall back to using sed. + func_lo2o () + { + func_lo2o_result=`$ECHO "$1" | $SED "$lo2o"` + } + + func_xform () + { + func_xform_result=`$ECHO "$1" | $SED 's|\.[^.]*$|.lo|'` + } +fi + + +# func_fatal_configuration ARG... +# ------------------------------- +# Echo program name prefixed message to standard error, followed by +# a configuration failure hint, and exit. +func_fatal_configuration () +{ + func_fatal_error ${1+"$@"} \ + "See the $PACKAGE documentation for more information." \ + "Fatal configuration error." +} + + +# func_config +# ----------- +# Display the configuration for all the tags in this script. +func_config () +{ + re_begincf='^# ### BEGIN LIBTOOL' + re_endcf='^# ### END LIBTOOL' + + # Default configuration. + $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath" + + # Now print the configurations for the tags. + for tagname in $taglist; do + $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath" + done + + exit $? +} + + +# func_features +# ------------- +# Display the features supported by this script. +func_features () +{ + echo "host: $host" + if test yes = "$build_libtool_libs"; then + echo "enable shared libraries" + else + echo "disable shared libraries" + fi + if test yes = "$build_old_libs"; then + echo "enable static libraries" + else + echo "disable static libraries" + fi + + exit $? +} + + +# func_enable_tag TAGNAME +# ----------------------- +# Verify that TAGNAME is valid, and either flag an error and exit, or +# enable the TAGNAME tag. We also add TAGNAME to the global $taglist +# variable here. +func_enable_tag () +{ + # Global variable: + tagname=$1 + + re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$" + re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$" + sed_extractcf=/$re_begincf/,/$re_endcf/p + + # Validate tagname. + case $tagname in + *[!-_A-Za-z0-9,/]*) + func_fatal_error "invalid tag name: $tagname" + ;; + esac + + # Don't test for the "default" C tag, as we know it's + # there but not specially marked. + case $tagname in + CC) ;; + *) + if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then + taglist="$taglist $tagname" + + # Evaluate the configuration. Be careful to quote the path + # and the sed script, to avoid splitting on whitespace, but + # also don't use non-portable quotes within backquotes within + # quotes we have to do it in 2 steps: + extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"` + eval "$extractedcf" + else + func_error "ignoring unknown tag $tagname" + fi + ;; + esac +} + + +# func_check_version_match +# ------------------------ +# Ensure that we are using m4 macros, and libtool script from the same +# release of libtool. +func_check_version_match () +{ + if test "$package_revision" != "$macro_revision"; then + if test "$VERSION" != "$macro_version"; then + if test -z "$macro_version"; then + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from an older release. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from $PACKAGE $macro_version. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + fi + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, +$progname: but the definition of this LT_INIT comes from revision $macro_revision. +$progname: You should recreate aclocal.m4 with macros from revision $package_revision +$progname: of $PACKAGE $VERSION and run autoconf again. +_LT_EOF + fi + + exit $EXIT_MISMATCH + fi +} + + +# libtool_options_prep [ARG]... +# ----------------------------- +# Preparation for options parsed by libtool. +libtool_options_prep () +{ + $debug_mode + + # Option defaults: + opt_config=false + opt_dlopen= + opt_dry_run=false + opt_help=false + opt_mode= + opt_preserve_dup_deps=false + opt_quiet=false + + nonopt= + preserve_args= + + _G_rc_lt_options_prep=: + + # Shorthand for --mode=foo, only valid as the first argument + case $1 in + clean|clea|cle|cl) + shift; set dummy --mode clean ${1+"$@"}; shift + ;; + compile|compil|compi|comp|com|co|c) + shift; set dummy --mode compile ${1+"$@"}; shift + ;; + execute|execut|execu|exec|exe|ex|e) + shift; set dummy --mode execute ${1+"$@"}; shift + ;; + finish|finis|fini|fin|fi|f) + shift; set dummy --mode finish ${1+"$@"}; shift + ;; + install|instal|insta|inst|ins|in|i) + shift; set dummy --mode install ${1+"$@"}; shift + ;; + link|lin|li|l) + shift; set dummy --mode link ${1+"$@"}; shift + ;; + uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) + shift; set dummy --mode uninstall ${1+"$@"}; shift + ;; + *) + _G_rc_lt_options_prep=false + ;; + esac + + if $_G_rc_lt_options_prep; then + # Pass back the list of options. + func_quote eval ${1+"$@"} + libtool_options_prep_result=$func_quote_result + fi +} +func_add_hook func_options_prep libtool_options_prep + + +# libtool_parse_options [ARG]... +# --------------------------------- +# Provide handling for libtool specific options. +libtool_parse_options () +{ + $debug_cmd + + _G_rc_lt_parse_options=false + + # Perform our own loop to consume as many options as possible in + # each iteration. + while test $# -gt 0; do + _G_match_lt_parse_options=: + _G_opt=$1 + shift + case $_G_opt in + --dry-run|--dryrun|-n) + opt_dry_run=: + ;; + + --config) func_config ;; + + --dlopen|-dlopen) + opt_dlopen="${opt_dlopen+$opt_dlopen +}$1" + shift + ;; + + --preserve-dup-deps) + opt_preserve_dup_deps=: ;; + + --features) func_features ;; + + --finish) set dummy --mode finish ${1+"$@"}; shift ;; + + --help) opt_help=: ;; + + --help-all) opt_help=': help-all' ;; + + --mode) test $# = 0 && func_missing_arg $_G_opt && break + opt_mode=$1 + case $1 in + # Valid mode arguments: + clean|compile|execute|finish|install|link|relink|uninstall) ;; + + # Catch anything else as an error + *) func_error "invalid argument for $_G_opt" + exit_cmd=exit + break + ;; + esac + shift + ;; + + --no-silent|--no-quiet) + opt_quiet=false + func_append preserve_args " $_G_opt" + ;; + + --no-warnings|--no-warning|--no-warn) + opt_warning=false + func_append preserve_args " $_G_opt" + ;; + + --no-verbose) + opt_verbose=false + func_append preserve_args " $_G_opt" + ;; + + --silent|--quiet) + opt_quiet=: + opt_verbose=false + func_append preserve_args " $_G_opt" + ;; + + --tag) test $# = 0 && func_missing_arg $_G_opt && break + opt_tag=$1 + func_append preserve_args " $_G_opt $1" + func_enable_tag "$1" + shift + ;; + + --verbose|-v) opt_quiet=false + opt_verbose=: + func_append preserve_args " $_G_opt" + ;; + + # An option not handled by this hook function: + *) set dummy "$_G_opt" ${1+"$@"} ; shift + _G_match_lt_parse_options=false + break + ;; + esac + $_G_match_lt_parse_options && _G_rc_lt_parse_options=: + done + + if $_G_rc_lt_parse_options; then + # save modified positional parameters for caller + func_quote eval ${1+"$@"} + libtool_parse_options_result=$func_quote_result + fi +} +func_add_hook func_parse_options libtool_parse_options + + + +# libtool_validate_options [ARG]... +# --------------------------------- +# Perform any sanity checks on option settings and/or unconsumed +# arguments. +libtool_validate_options () +{ + # save first non-option argument + if test 0 -lt $#; then + nonopt=$1 + shift + fi + + # preserve --debug + test : = "$debug_cmd" || func_append preserve_args " --debug" + + case $host in + # Solaris2 added to fix http://debbugs.gnu.org/cgi/bugreport.cgi?bug=16452 + # see also: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=59788 + *cygwin* | *mingw* | *pw32* | *cegcc* | *solaris2* | *os2*) + # don't eliminate duplications in $postdeps and $predeps + opt_duplicate_compiler_generated_deps=: + ;; + *) + opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps + ;; + esac + + $opt_help || { + # Sanity checks first: + func_check_version_match + + test yes != "$build_libtool_libs" \ + && test yes != "$build_old_libs" \ + && func_fatal_configuration "not configured to build any kind of library" + + # Darwin sucks + eval std_shrext=\"$shrext_cmds\" + + # Only execute mode is allowed to have -dlopen flags. + if test -n "$opt_dlopen" && test execute != "$opt_mode"; then + func_error "unrecognized option '-dlopen'" + $ECHO "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Change the help message to a mode-specific one. + generic_help=$help + help="Try '$progname --help --mode=$opt_mode' for more information." + } + + # Pass back the unparsed argument list + func_quote eval ${1+"$@"} + libtool_validate_options_result=$func_quote_result +} +func_add_hook func_validate_options libtool_validate_options + + +# Process options as early as possible so that --help and --version +# can return quickly. +func_options ${1+"$@"} +eval set dummy "$func_options_result"; shift + + + +## ----------- ## +## Main. ## +## ----------- ## + +magic='%%%MAGIC variable%%%' +magic_exe='%%%MAGIC EXE variable%%%' + +# Global variables. +extracted_archives= +extracted_serial=0 + +# If this variable is set in any of the actions, the command in it +# will be execed at the end. This prevents here-documents from being +# left over by shells. +exec_cmd= + + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' +} + +# func_generated_by_libtool +# True iff stdin has been generated by Libtool. This function is only +# a basic sanity check; it will hardly flush out determined imposters. +func_generated_by_libtool_p () +{ + $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1 +} + +# func_lalib_p file +# True iff FILE is a libtool '.la' library or '.lo' object file. +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_lalib_p () +{ + test -f "$1" && + $SED -e 4q "$1" 2>/dev/null | func_generated_by_libtool_p +} + +# func_lalib_unsafe_p file +# True iff FILE is a libtool '.la' library or '.lo' object file. +# This function implements the same check as func_lalib_p without +# resorting to external programs. To this end, it redirects stdin and +# closes it afterwards, without saving the original file descriptor. +# As a safety measure, use it only where a negative result would be +# fatal anyway. Works if 'file' does not exist. +func_lalib_unsafe_p () +{ + lalib_p=no + if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then + for lalib_p_l in 1 2 3 4 + do + read lalib_p_line + case $lalib_p_line in + \#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;; + esac + done + exec 0<&5 5<&- + fi + test yes = "$lalib_p" +} + +# func_ltwrapper_script_p file +# True iff FILE is a libtool wrapper script +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_script_p () +{ + test -f "$1" && + $lt_truncate_bin < "$1" 2>/dev/null | func_generated_by_libtool_p +} + +# func_ltwrapper_executable_p file +# True iff FILE is a libtool wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_executable_p () +{ + func_ltwrapper_exec_suffix= + case $1 in + *.exe) ;; + *) func_ltwrapper_exec_suffix=.exe ;; + esac + $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1 +} + +# func_ltwrapper_scriptname file +# Assumes file is an ltwrapper_executable +# uses $file to determine the appropriate filename for a +# temporary ltwrapper_script. +func_ltwrapper_scriptname () +{ + func_dirname_and_basename "$1" "" "." + func_stripname '' '.exe' "$func_basename_result" + func_ltwrapper_scriptname_result=$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper +} + +# func_ltwrapper_p file +# True iff FILE is a libtool wrapper script or wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_p () +{ + func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1" +} + + +# func_execute_cmds commands fail_cmd +# Execute tilde-delimited COMMANDS. +# If FAIL_CMD is given, eval that upon failure. +# FAIL_CMD may read-access the current command in variable CMD! +func_execute_cmds () +{ + $debug_cmd + + save_ifs=$IFS; IFS='~' + for cmd in $1; do + IFS=$sp$nl + eval cmd=\"$cmd\" + IFS=$save_ifs + func_show_eval "$cmd" "${2-:}" + done + IFS=$save_ifs +} + + +# func_source file +# Source FILE, adding directory component if necessary. +# Note that it is not necessary on cygwin/mingw to append a dot to +# FILE even if both FILE and FILE.exe exist: automatic-append-.exe +# behavior happens only for exec(3), not for open(2)! Also, sourcing +# 'FILE.' does not work on cygwin managed mounts. +func_source () +{ + $debug_cmd + + case $1 in + */* | *\\*) . "$1" ;; + *) . "./$1" ;; + esac +} + + +# func_resolve_sysroot PATH +# Replace a leading = in PATH with a sysroot. Store the result into +# func_resolve_sysroot_result +func_resolve_sysroot () +{ + func_resolve_sysroot_result=$1 + case $func_resolve_sysroot_result in + =*) + func_stripname '=' '' "$func_resolve_sysroot_result" + func_resolve_sysroot_result=$lt_sysroot$func_stripname_result + ;; + esac +} + +# func_replace_sysroot PATH +# If PATH begins with the sysroot, replace it with = and +# store the result into func_replace_sysroot_result. +func_replace_sysroot () +{ + case $lt_sysroot:$1 in + ?*:"$lt_sysroot"*) + func_stripname "$lt_sysroot" '' "$1" + func_replace_sysroot_result='='$func_stripname_result + ;; + *) + # Including no sysroot. + func_replace_sysroot_result=$1 + ;; + esac +} + +# func_infer_tag arg +# Infer tagged configuration to use if any are available and +# if one wasn't chosen via the "--tag" command line option. +# Only attempt this if the compiler in the base compile +# command doesn't match the default compiler. +# arg is usually of the form 'gcc ...' +func_infer_tag () +{ + $debug_cmd + + if test -n "$available_tags" && test -z "$tagname"; then + CC_quoted= + for arg in $CC; do + func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` + case $@ in + # Blanks in the command may have been stripped by the calling shell, + # but not from the CC environment variable when configure was run. + " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ + " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) ;; + # Blanks at the start of $base_compile will cause this to fail + # if we don't check for them as well. + *) + for z in $available_tags; do + if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then + # Evaluate the configuration. + eval "`$SED -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" + CC_quoted= + for arg in $CC; do + # Double-quote args containing other shell metacharacters. + func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` + case "$@ " in + " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ + " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) + # The compiler in the base compile command matches + # the one in the tagged configuration. + # Assume this is the tagged configuration we want. + tagname=$z + break + ;; + esac + fi + done + # If $tagname still isn't set, then no tagged configuration + # was found and let the user know that the "--tag" command + # line option must be used. + if test -z "$tagname"; then + func_echo "unable to infer tagged configuration" + func_fatal_error "specify a tag with '--tag'" +# else +# func_verbose "using $tagname tagged configuration" + fi + ;; + esac + fi +} + + + +# func_write_libtool_object output_name pic_name nonpic_name +# Create a libtool object file (analogous to a ".la" file), +# but don't create it if we're doing a dry run. +func_write_libtool_object () +{ + write_libobj=$1 + if test yes = "$build_libtool_libs"; then + write_lobj=\'$2\' + else + write_lobj=none + fi + + if test yes = "$build_old_libs"; then + write_oldobj=\'$3\' + else + write_oldobj=none + fi + + $opt_dry_run || { + cat >${write_libobj}T </dev/null` + if test "$?" -eq 0 && test -n "$func_convert_core_file_wine_to_w32_tmp"; then + func_convert_core_file_wine_to_w32_result=`$ECHO "$func_convert_core_file_wine_to_w32_tmp" | + $SED -e "$sed_naive_backslashify"` + else + func_convert_core_file_wine_to_w32_result= + fi + fi +} +# end: func_convert_core_file_wine_to_w32 + + +# func_convert_core_path_wine_to_w32 ARG +# Helper function used by path conversion functions when $build is *nix, and +# $host is mingw, cygwin, or some other w32 environment. Relies on a correctly +# configured wine environment available, with the winepath program in $build's +# $PATH. Assumes ARG has no leading or trailing path separator characters. +# +# ARG is path to be converted from $build format to win32. +# Result is available in $func_convert_core_path_wine_to_w32_result. +# Unconvertible file (directory) names in ARG are skipped; if no directory names +# are convertible, then the result may be empty. +func_convert_core_path_wine_to_w32 () +{ + $debug_cmd + + # unfortunately, winepath doesn't convert paths, only file names + func_convert_core_path_wine_to_w32_result= + if test -n "$1"; then + oldIFS=$IFS + IFS=: + for func_convert_core_path_wine_to_w32_f in $1; do + IFS=$oldIFS + func_convert_core_file_wine_to_w32 "$func_convert_core_path_wine_to_w32_f" + if test -n "$func_convert_core_file_wine_to_w32_result"; then + if test -z "$func_convert_core_path_wine_to_w32_result"; then + func_convert_core_path_wine_to_w32_result=$func_convert_core_file_wine_to_w32_result + else + func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result" + fi + fi + done + IFS=$oldIFS + fi +} +# end: func_convert_core_path_wine_to_w32 + + +# func_cygpath ARGS... +# Wrapper around calling the cygpath program via LT_CYGPATH. This is used when +# when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2) +# $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or +# (2), returns the Cygwin file name or path in func_cygpath_result (input +# file name or path is assumed to be in w32 format, as previously converted +# from $build's *nix or MSYS format). In case (3), returns the w32 file name +# or path in func_cygpath_result (input file name or path is assumed to be in +# Cygwin format). Returns an empty string on error. +# +# ARGS are passed to cygpath, with the last one being the file name or path to +# be converted. +# +# Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH +# environment variable; do not put it in $PATH. +func_cygpath () +{ + $debug_cmd + + if test -n "$LT_CYGPATH" && test -f "$LT_CYGPATH"; then + func_cygpath_result=`$LT_CYGPATH "$@" 2>/dev/null` + if test "$?" -ne 0; then + # on failure, ensure result is empty + func_cygpath_result= + fi + else + func_cygpath_result= + func_error "LT_CYGPATH is empty or specifies non-existent file: '$LT_CYGPATH'" + fi +} +#end: func_cygpath + + +# func_convert_core_msys_to_w32 ARG +# Convert file name or path ARG from MSYS format to w32 format. Return +# result in func_convert_core_msys_to_w32_result. +func_convert_core_msys_to_w32 () +{ + $debug_cmd + + # awkward: cmd appends spaces to result + func_convert_core_msys_to_w32_result=`( cmd //c echo "$1" ) 2>/dev/null | + $SED -e 's/[ ]*$//' -e "$sed_naive_backslashify"` +} +#end: func_convert_core_msys_to_w32 + + +# func_convert_file_check ARG1 ARG2 +# Verify that ARG1 (a file name in $build format) was converted to $host +# format in ARG2. Otherwise, emit an error message, but continue (resetting +# func_to_host_file_result to ARG1). +func_convert_file_check () +{ + $debug_cmd + + if test -z "$2" && test -n "$1"; then + func_error "Could not determine host file name corresponding to" + func_error " '$1'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback: + func_to_host_file_result=$1 + fi +} +# end func_convert_file_check + + +# func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH +# Verify that FROM_PATH (a path in $build format) was converted to $host +# format in TO_PATH. Otherwise, emit an error message, but continue, resetting +# func_to_host_file_result to a simplistic fallback value (see below). +func_convert_path_check () +{ + $debug_cmd + + if test -z "$4" && test -n "$3"; then + func_error "Could not determine the host path corresponding to" + func_error " '$3'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback. This is a deliberately simplistic "conversion" and + # should not be "improved". See libtool.info. + if test "x$1" != "x$2"; then + lt_replace_pathsep_chars="s|$1|$2|g" + func_to_host_path_result=`echo "$3" | + $SED -e "$lt_replace_pathsep_chars"` + else + func_to_host_path_result=$3 + fi + fi +} +# end func_convert_path_check + + +# func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG +# Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT +# and appending REPL if ORIG matches BACKPAT. +func_convert_path_front_back_pathsep () +{ + $debug_cmd + + case $4 in + $1 ) func_to_host_path_result=$3$func_to_host_path_result + ;; + esac + case $4 in + $2 ) func_append func_to_host_path_result "$3" + ;; + esac +} +# end func_convert_path_front_back_pathsep + + +################################################## +# $build to $host FILE NAME CONVERSION FUNCTIONS # +################################################## +# invoked via '$to_host_file_cmd ARG' +# +# In each case, ARG is the path to be converted from $build to $host format. +# Result will be available in $func_to_host_file_result. + + +# func_to_host_file ARG +# Converts the file name ARG from $build format to $host format. Return result +# in func_to_host_file_result. +func_to_host_file () +{ + $debug_cmd + + $to_host_file_cmd "$1" +} +# end func_to_host_file + + +# func_to_tool_file ARG LAZY +# converts the file name ARG from $build format to toolchain format. Return +# result in func_to_tool_file_result. If the conversion in use is listed +# in (the comma separated) LAZY, no conversion takes place. +func_to_tool_file () +{ + $debug_cmd + + case ,$2, in + *,"$to_tool_file_cmd",*) + func_to_tool_file_result=$1 + ;; + *) + $to_tool_file_cmd "$1" + func_to_tool_file_result=$func_to_host_file_result + ;; + esac +} +# end func_to_tool_file + + +# func_convert_file_noop ARG +# Copy ARG to func_to_host_file_result. +func_convert_file_noop () +{ + func_to_host_file_result=$1 +} +# end func_convert_file_noop + + +# func_convert_file_msys_to_w32 ARG +# Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic +# conversion to w32 is not available inside the cwrapper. Returns result in +# func_to_host_file_result. +func_convert_file_msys_to_w32 () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + func_convert_core_msys_to_w32 "$1" + func_to_host_file_result=$func_convert_core_msys_to_w32_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_msys_to_w32 + + +# func_convert_file_cygwin_to_w32 ARG +# Convert file name ARG from Cygwin to w32 format. Returns result in +# func_to_host_file_result. +func_convert_file_cygwin_to_w32 () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + # because $build is cygwin, we call "the" cygpath in $PATH; no need to use + # LT_CYGPATH in this case. + func_to_host_file_result=`cygpath -m "$1"` + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_cygwin_to_w32 + + +# func_convert_file_nix_to_w32 ARG +# Convert file name ARG from *nix to w32 format. Requires a wine environment +# and a working winepath. Returns result in func_to_host_file_result. +func_convert_file_nix_to_w32 () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + func_convert_core_file_wine_to_w32 "$1" + func_to_host_file_result=$func_convert_core_file_wine_to_w32_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_nix_to_w32 + + +# func_convert_file_msys_to_cygwin ARG +# Convert file name ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. +# Returns result in func_to_host_file_result. +func_convert_file_msys_to_cygwin () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + func_convert_core_msys_to_w32 "$1" + func_cygpath -u "$func_convert_core_msys_to_w32_result" + func_to_host_file_result=$func_cygpath_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_msys_to_cygwin + + +# func_convert_file_nix_to_cygwin ARG +# Convert file name ARG from *nix to Cygwin format. Requires Cygwin installed +# in a wine environment, working winepath, and LT_CYGPATH set. Returns result +# in func_to_host_file_result. +func_convert_file_nix_to_cygwin () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + # convert from *nix to w32, then use cygpath to convert from w32 to cygwin. + func_convert_core_file_wine_to_w32 "$1" + func_cygpath -u "$func_convert_core_file_wine_to_w32_result" + func_to_host_file_result=$func_cygpath_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_nix_to_cygwin + + +############################################# +# $build to $host PATH CONVERSION FUNCTIONS # +############################################# +# invoked via '$to_host_path_cmd ARG' +# +# In each case, ARG is the path to be converted from $build to $host format. +# The result will be available in $func_to_host_path_result. +# +# Path separators are also converted from $build format to $host format. If +# ARG begins or ends with a path separator character, it is preserved (but +# converted to $host format) on output. +# +# All path conversion functions are named using the following convention: +# file name conversion function : func_convert_file_X_to_Y () +# path conversion function : func_convert_path_X_to_Y () +# where, for any given $build/$host combination the 'X_to_Y' value is the +# same. If conversion functions are added for new $build/$host combinations, +# the two new functions must follow this pattern, or func_init_to_host_path_cmd +# will break. + + +# func_init_to_host_path_cmd +# Ensures that function "pointer" variable $to_host_path_cmd is set to the +# appropriate value, based on the value of $to_host_file_cmd. +to_host_path_cmd= +func_init_to_host_path_cmd () +{ + $debug_cmd + + if test -z "$to_host_path_cmd"; then + func_stripname 'func_convert_file_' '' "$to_host_file_cmd" + to_host_path_cmd=func_convert_path_$func_stripname_result + fi +} + + +# func_to_host_path ARG +# Converts the path ARG from $build format to $host format. Return result +# in func_to_host_path_result. +func_to_host_path () +{ + $debug_cmd + + func_init_to_host_path_cmd + $to_host_path_cmd "$1" +} +# end func_to_host_path + + +# func_convert_path_noop ARG +# Copy ARG to func_to_host_path_result. +func_convert_path_noop () +{ + func_to_host_path_result=$1 +} +# end func_convert_path_noop + + +# func_convert_path_msys_to_w32 ARG +# Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic +# conversion to w32 is not available inside the cwrapper. Returns result in +# func_to_host_path_result. +func_convert_path_msys_to_w32 () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # Remove leading and trailing path separator characters from ARG. MSYS + # behavior is inconsistent here; cygpath turns them into '.;' and ';.'; + # and winepath ignores them completely. + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" + func_to_host_path_result=$func_convert_core_msys_to_w32_result + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_msys_to_w32 + + +# func_convert_path_cygwin_to_w32 ARG +# Convert path ARG from Cygwin to w32 format. Returns result in +# func_to_host_file_result. +func_convert_path_cygwin_to_w32 () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_to_host_path_result=`cygpath -m -p "$func_to_host_path_tmp1"` + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_cygwin_to_w32 + + +# func_convert_path_nix_to_w32 ARG +# Convert path ARG from *nix to w32 format. Requires a wine environment and +# a working winepath. Returns result in func_to_host_file_result. +func_convert_path_nix_to_w32 () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" + func_to_host_path_result=$func_convert_core_path_wine_to_w32_result + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_nix_to_w32 + + +# func_convert_path_msys_to_cygwin ARG +# Convert path ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. +# Returns result in func_to_host_file_result. +func_convert_path_msys_to_cygwin () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" + func_cygpath -u -p "$func_convert_core_msys_to_w32_result" + func_to_host_path_result=$func_cygpath_result + func_convert_path_check : : \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" : "$1" + fi +} +# end func_convert_path_msys_to_cygwin + + +# func_convert_path_nix_to_cygwin ARG +# Convert path ARG from *nix to Cygwin format. Requires Cygwin installed in a +# a wine environment, working winepath, and LT_CYGPATH set. Returns result in +# func_to_host_file_result. +func_convert_path_nix_to_cygwin () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # Remove leading and trailing path separator characters from + # ARG. msys behavior is inconsistent here, cygpath turns them + # into '.;' and ';.', and winepath ignores them completely. + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" + func_cygpath -u -p "$func_convert_core_path_wine_to_w32_result" + func_to_host_path_result=$func_cygpath_result + func_convert_path_check : : \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" : "$1" + fi +} +# end func_convert_path_nix_to_cygwin + + +# func_dll_def_p FILE +# True iff FILE is a Windows DLL '.def' file. +# Keep in sync with _LT_DLL_DEF_P in libtool.m4 +func_dll_def_p () +{ + $debug_cmd + + func_dll_def_p_tmp=`$SED -n \ + -e 's/^[ ]*//' \ + -e '/^\(;.*\)*$/d' \ + -e 's/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p' \ + -e q \ + "$1"` + test DEF = "$func_dll_def_p_tmp" +} + + +# func_mode_compile arg... +func_mode_compile () +{ + $debug_cmd + + # Get the compilation command and the source file. + base_compile= + srcfile=$nonopt # always keep a non-empty value in "srcfile" + suppress_opt=yes + suppress_output= + arg_mode=normal + libobj= + later= + pie_flag= + + for arg + do + case $arg_mode in + arg ) + # do not "continue". Instead, add this to base_compile + lastarg=$arg + arg_mode=normal + ;; + + target ) + libobj=$arg + arg_mode=normal + continue + ;; + + normal ) + # Accept any command-line options. + case $arg in + -o) + test -n "$libobj" && \ + func_fatal_error "you cannot specify '-o' more than once" + arg_mode=target + continue + ;; + + -pie | -fpie | -fPIE) + func_append pie_flag " $arg" + continue + ;; + + -shared | -static | -prefer-pic | -prefer-non-pic) + func_append later " $arg" + continue + ;; + + -no-suppress) + suppress_opt=no + continue + ;; + + -Xcompiler) + arg_mode=arg # the next one goes into the "base_compile" arg list + continue # The current "srcfile" will either be retained or + ;; # replaced later. I would guess that would be a bug. + + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + lastarg= + save_ifs=$IFS; IFS=, + for arg in $args; do + IFS=$save_ifs + func_append_quoted lastarg "$arg" + done + IFS=$save_ifs + func_stripname ' ' '' "$lastarg" + lastarg=$func_stripname_result + + # Add the arguments to base_compile. + func_append base_compile " $lastarg" + continue + ;; + + *) + # Accept the current argument as the source file. + # The previous "srcfile" becomes the current argument. + # + lastarg=$srcfile + srcfile=$arg + ;; + esac # case $arg + ;; + esac # case $arg_mode + + # Aesthetically quote the previous argument. + func_append_quoted base_compile "$lastarg" + done # for arg + + case $arg_mode in + arg) + func_fatal_error "you must specify an argument for -Xcompile" + ;; + target) + func_fatal_error "you must specify a target with '-o'" + ;; + *) + # Get the name of the library object. + test -z "$libobj" && { + func_basename "$srcfile" + libobj=$func_basename_result + } + ;; + esac + + # Recognize several different file suffixes. + # If the user specifies -o file.o, it is replaced with file.lo + case $libobj in + *.[cCFSifmso] | \ + *.ada | *.adb | *.ads | *.asm | \ + *.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \ + *.[fF][09]? | *.for | *.java | *.go | *.obj | *.sx | *.cu | *.cup) + func_xform "$libobj" + libobj=$func_xform_result + ;; + esac + + case $libobj in + *.lo) func_lo2o "$libobj"; obj=$func_lo2o_result ;; + *) + func_fatal_error "cannot determine name of library object from '$libobj'" + ;; + esac + + func_infer_tag $base_compile + + for arg in $later; do + case $arg in + -shared) + test yes = "$build_libtool_libs" \ + || func_fatal_configuration "cannot build a shared library" + build_old_libs=no + continue + ;; + + -static) + build_libtool_libs=no + build_old_libs=yes + continue + ;; + + -prefer-pic) + pic_mode=yes + continue + ;; + + -prefer-non-pic) + pic_mode=no + continue + ;; + esac + done + + func_quote_arg pretty "$libobj" + test "X$libobj" != "X$func_quote_arg_result" \ + && $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"' &()|`$[]' \ + && func_warning "libobj name '$libobj' may not contain shell special characters." + func_dirname_and_basename "$obj" "/" "" + objname=$func_basename_result + xdir=$func_dirname_result + lobj=$xdir$objdir/$objname + + test -z "$base_compile" && \ + func_fatal_help "you must specify a compilation command" + + # Delete any leftover library objects. + if test yes = "$build_old_libs"; then + removelist="$obj $lobj $libobj ${libobj}T" + else + removelist="$lobj $libobj ${libobj}T" + fi + + # On Cygwin there's no "real" PIC flag so we must build both object types + case $host_os in + cygwin* | mingw* | pw32* | os2* | cegcc*) + pic_mode=default + ;; + esac + if test no = "$pic_mode" && test pass_all != "$deplibs_check_method"; then + # non-PIC code in shared libraries is not supported + pic_mode=default + fi + + # Calculate the filename of the output object if compiler does + # not support -o with -c + if test no = "$compiler_c_o"; then + output_obj=`$ECHO "$srcfile" | $SED 's%^.*/%%; s%\.[^.]*$%%'`.$objext + lockfile=$output_obj.lock + else + output_obj= + need_locks=no + lockfile= + fi + + # Lock this critical section if it is needed + # We use this script file to make the link, it avoids creating a new file + if test yes = "$need_locks"; then + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" + sleep 2 + done + elif test warn = "$need_locks"; then + if test -f "$lockfile"; then + $ECHO "\ +*** ERROR, $lockfile exists and contains: +`cat $lockfile 2>/dev/null` + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support '-c' and '-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + func_append removelist " $output_obj" + $ECHO "$srcfile" > "$lockfile" + fi + + $opt_dry_run || $RM $removelist + func_append removelist " $lockfile" + trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15 + + func_to_tool_file "$srcfile" func_convert_file_msys_to_w32 + srcfile=$func_to_tool_file_result + func_quote_arg pretty "$srcfile" + qsrcfile=$func_quote_arg_result + + # Only build a PIC object if we are building libtool libraries. + if test yes = "$build_libtool_libs"; then + # Without this assignment, base_compile gets emptied. + fbsd_hideous_sh_bug=$base_compile + + if test no != "$pic_mode"; then + command="$base_compile $qsrcfile $pic_flag" + else + # Don't build PIC code + command="$base_compile $qsrcfile" + fi + + func_mkdir_p "$xdir$objdir" + + if test -z "$output_obj"; then + # Place PIC objects in $objdir + func_append command " -o $lobj" + fi + + func_show_eval_locale "$command" \ + 'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE' + + if test warn = "$need_locks" && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support '-c' and '-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed, then go on to compile the next one + if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then + func_show_eval '$MV "$output_obj" "$lobj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + + # Allow error messages only from the first compilation. + if test yes = "$suppress_opt"; then + suppress_output=' >/dev/null 2>&1' + fi + fi + + # Only build a position-dependent object if we build old libraries. + if test yes = "$build_old_libs"; then + if test yes != "$pic_mode"; then + # Don't build PIC code + command="$base_compile $qsrcfile$pie_flag" + else + command="$base_compile $qsrcfile $pic_flag" + fi + if test yes = "$compiler_c_o"; then + func_append command " -o $obj" + fi + + # Suppress compiler output if we already did a PIC compilation. + func_append command "$suppress_output" + func_show_eval_locale "$command" \ + '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' + + if test warn = "$need_locks" && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support '-c' and '-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed + if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then + func_show_eval '$MV "$output_obj" "$obj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + fi + + $opt_dry_run || { + func_write_libtool_object "$libobj" "$objdir/$objname" "$objname" + + # Unlock the critical section if it was locked + if test no != "$need_locks"; then + removelist=$lockfile + $RM "$lockfile" + fi + } + + exit $EXIT_SUCCESS +} + +$opt_help || { + test compile = "$opt_mode" && func_mode_compile ${1+"$@"} +} + +func_mode_help () +{ + # We need to display help for each of the modes. + case $opt_mode in + "") + # Generic help is extracted from the usage comments + # at the start of this file. + func_help + ;; + + clean) + $ECHO \ +"Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE... + +Remove files from the build directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically '/bin/rm'). RM-OPTIONS are options (such as '-f') to be passed +to RM. + +If FILE is a libtool library, object or program, all the files associated +with it are deleted. Otherwise, only FILE itself is deleted using RM." + ;; + + compile) + $ECHO \ +"Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE + +Compile a source file into a libtool library object. + +This mode accepts the following additional options: + + -o OUTPUT-FILE set the output file name to OUTPUT-FILE + -no-suppress do not suppress compiler output for multiple passes + -prefer-pic try to build PIC objects only + -prefer-non-pic try to build non-PIC objects only + -shared do not build a '.o' file suitable for static linking + -static only build a '.o' file suitable for static linking + -Wc,FLAG + -Xcompiler FLAG pass FLAG directly to the compiler + +COMPILE-COMMAND is a command to be used in creating a 'standard' object file +from the given SOURCEFILE. + +The output file name is determined by removing the directory component from +SOURCEFILE, then substituting the C source code suffix '.c' with the +library object suffix, '.lo'." + ;; + + execute) + $ECHO \ +"Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]... + +Automatically set library path, then run a program. + +This mode accepts the following additional options: + + -dlopen FILE add the directory containing FILE to the library path + +This mode sets the library path environment variable according to '-dlopen' +flags. + +If any of the ARGS are libtool executable wrappers, then they are translated +into their corresponding uninstalled binary, and any of their required library +directories are added to the library path. + +Then, COMMAND is executed, with ARGS as arguments." + ;; + + finish) + $ECHO \ +"Usage: $progname [OPTION]... --mode=finish [LIBDIR]... + +Complete the installation of libtool libraries. + +Each LIBDIR is a directory that contains libtool libraries. + +The commands that this mode executes may require superuser privileges. Use +the '--dry-run' option if you just want to see what would be executed." + ;; + + install) + $ECHO \ +"Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND... + +Install executables or libraries. + +INSTALL-COMMAND is the installation command. The first component should be +either the 'install' or 'cp' program. + +The following components of INSTALL-COMMAND are treated specially: + + -inst-prefix-dir PREFIX-DIR Use PREFIX-DIR as a staging area for installation + +The rest of the components are interpreted as arguments to that command (only +BSD-compatible install options are recognized)." + ;; + + link) + $ECHO \ +"Usage: $progname [OPTION]... --mode=link LINK-COMMAND... + +Link object files or libraries together to form another library, or to +create an executable program. + +LINK-COMMAND is a command using the C compiler that you would use to create +a program from several object files. + +The following components of LINK-COMMAND are treated specially: + + -all-static do not do any dynamic linking at all + -avoid-version do not add a version suffix if possible + -bindir BINDIR specify path to binaries directory (for systems where + libraries must be found in the PATH setting at runtime) + -dlopen FILE '-dlpreopen' FILE if it cannot be dlopened at runtime + -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols + -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) + -export-symbols SYMFILE + try to export only the symbols listed in SYMFILE + -export-symbols-regex REGEX + try to export only the symbols matching REGEX + -LLIBDIR search LIBDIR for required installed libraries + -lNAME OUTPUT-FILE requires the installed library libNAME + -module build a library that can dlopened + -no-fast-install disable the fast-install mode + -no-install link a not-installable executable + -no-undefined declare that a library does not refer to external symbols + -o OUTPUT-FILE create OUTPUT-FILE from the specified objects + -objectlist FILE use a list of object files found in FILE to specify objects + -os2dllname NAME force a short DLL name on OS/2 (no effect on other OSes) + -precious-files-regex REGEX + don't remove output files matching REGEX + -release RELEASE specify package release information + -rpath LIBDIR the created library will eventually be installed in LIBDIR + -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries + -shared only do dynamic linking of libtool libraries + -shrext SUFFIX override the standard shared library file extension + -static do not do any dynamic linking of uninstalled libtool libraries + -static-libtool-libs + do not do any dynamic linking of libtool libraries + -version-info CURRENT[:REVISION[:AGE]] + specify library version info [each variable defaults to 0] + -weak LIBNAME declare that the target provides the LIBNAME interface + -Wc,FLAG + -Xcompiler FLAG pass linker-specific FLAG directly to the compiler + -Wa,FLAG + -Xassembler FLAG pass linker-specific FLAG directly to the assembler + -Wl,FLAG + -Xlinker FLAG pass linker-specific FLAG directly to the linker + -XCClinker FLAG pass link-specific FLAG to the compiler driver (CC) + +All other options (arguments beginning with '-') are ignored. + +Every other argument is treated as a filename. Files ending in '.la' are +treated as uninstalled libtool libraries, other files are standard or library +object files. + +If the OUTPUT-FILE ends in '.la', then a libtool library is created, +only library objects ('.lo' files) may be specified, and '-rpath' is +required, except when creating a convenience library. + +If OUTPUT-FILE ends in '.a' or '.lib', then a standard library is created +using 'ar' and 'ranlib', or on Windows using 'lib'. + +If OUTPUT-FILE ends in '.lo' or '.$objext', then a reloadable object file +is created, otherwise an executable program is created." + ;; + + uninstall) + $ECHO \ +"Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... + +Remove libraries from an installation directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically '/bin/rm'). RM-OPTIONS are options (such as '-f') to be passed +to RM. + +If FILE is a libtool library, all the files associated with it are deleted. +Otherwise, only FILE itself is deleted using RM." + ;; + + *) + func_fatal_help "invalid operation mode '$opt_mode'" + ;; + esac + + echo + $ECHO "Try '$progname --help' for more information about other modes." +} + +# Now that we've collected a possible --mode arg, show help if necessary +if $opt_help; then + if test : = "$opt_help"; then + func_mode_help + else + { + func_help noexit + for opt_mode in compile link execute install finish uninstall clean; do + func_mode_help + done + } | $SED -n '1p; 2,$s/^Usage:/ or: /p' + { + func_help noexit + for opt_mode in compile link execute install finish uninstall clean; do + echo + func_mode_help + done + } | + $SED '1d + /^When reporting/,/^Report/{ + H + d + } + $x + /information about other modes/d + /more detailed .*MODE/d + s/^Usage:.*--mode=\([^ ]*\) .*/Description of \1 mode:/' + fi + exit $? +fi + + +# func_mode_execute arg... +func_mode_execute () +{ + $debug_cmd + + # The first argument is the command name. + cmd=$nonopt + test -z "$cmd" && \ + func_fatal_help "you must specify a COMMAND" + + # Handle -dlopen flags immediately. + for file in $opt_dlopen; do + test -f "$file" \ + || func_fatal_help "'$file' is not a file" + + dir= + case $file in + *.la) + func_resolve_sysroot "$file" + file=$func_resolve_sysroot_result + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "'$lib' is not a valid libtool archive" + + # Read the libtool library. + dlname= + library_names= + func_source "$file" + + # Skip this library if it cannot be dlopened. + if test -z "$dlname"; then + # Warn if it was a shared library. + test -n "$library_names" && \ + func_warning "'$file' was not linked with '-export-dynamic'" + continue + fi + + func_dirname "$file" "" "." + dir=$func_dirname_result + + if test -f "$dir/$objdir/$dlname"; then + func_append dir "/$objdir" + else + if test ! -f "$dir/$dlname"; then + func_fatal_error "cannot find '$dlname' in '$dir' or '$dir/$objdir'" + fi + fi + ;; + + *.lo) + # Just add the directory containing the .lo file. + func_dirname "$file" "" "." + dir=$func_dirname_result + ;; + + *) + func_warning "'-dlopen' is ignored for non-libtool libraries and objects" + continue + ;; + esac + + # Get the absolute pathname. + absdir=`cd "$dir" && pwd` + test -n "$absdir" && dir=$absdir + + # Now add the directory to shlibpath_var. + if eval "test -z \"\$$shlibpath_var\""; then + eval "$shlibpath_var=\"\$dir\"" + else + eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" + fi + done + + # This variable tells wrapper scripts just to set shlibpath_var + # rather than running their programs. + libtool_execute_magic=$magic + + # Check if any of the arguments is a wrapper script. + args= + for file + do + case $file in + -* | *.la | *.lo ) ;; + *) + # Do a test to see if this is really a libtool program. + if func_ltwrapper_script_p "$file"; then + func_source "$file" + # Transform arg to wrapped name. + file=$progdir/$program + elif func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + func_source "$func_ltwrapper_scriptname_result" + # Transform arg to wrapped name. + file=$progdir/$program + fi + ;; + esac + # Quote arguments (to preserve shell metacharacters). + func_append_quoted args "$file" + done + + if $opt_dry_run; then + # Display what would be done. + if test -n "$shlibpath_var"; then + eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\"" + echo "export $shlibpath_var" + fi + $ECHO "$cmd$args" + exit $EXIT_SUCCESS + else + if test -n "$shlibpath_var"; then + # Export the shlibpath_var. + eval "export $shlibpath_var" + fi + + # Restore saved environment variables + for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES + do + eval "if test \"\${save_$lt_var+set}\" = set; then + $lt_var=\$save_$lt_var; export $lt_var + else + $lt_unset $lt_var + fi" + done + + # Now prepare to actually exec the command. + exec_cmd=\$cmd$args + fi +} + +test execute = "$opt_mode" && func_mode_execute ${1+"$@"} + + +# func_mode_finish arg... +func_mode_finish () +{ + $debug_cmd + + libs= + libdirs= + admincmds= + + for opt in "$nonopt" ${1+"$@"} + do + if test -d "$opt"; then + func_append libdirs " $opt" + + elif test -f "$opt"; then + if func_lalib_unsafe_p "$opt"; then + func_append libs " $opt" + else + func_warning "'$opt' is not a valid libtool archive" + fi + + else + func_fatal_error "invalid argument '$opt'" + fi + done + + if test -n "$libs"; then + if test -n "$lt_sysroot"; then + sysroot_regex=`$ECHO "$lt_sysroot" | $SED "$sed_make_literal_regex"` + sysroot_cmd="s/\([ ']\)$sysroot_regex/\1/g;" + else + sysroot_cmd= + fi + + # Remove sysroot references + if $opt_dry_run; then + for lib in $libs; do + echo "removing references to $lt_sysroot and '=' prefixes from $lib" + done + else + tmpdir=`func_mktempdir` + for lib in $libs; do + $SED -e "$sysroot_cmd s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \ + > $tmpdir/tmp-la + mv -f $tmpdir/tmp-la $lib + done + ${RM}r "$tmpdir" + fi + fi + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + for libdir in $libdirs; do + if test -n "$finish_cmds"; then + # Do each command in the finish commands. + func_execute_cmds "$finish_cmds" 'admincmds="$admincmds +'"$cmd"'"' + fi + if test -n "$finish_eval"; then + # Do the single finish_eval. + eval cmds=\"$finish_eval\" + $opt_dry_run || eval "$cmds" || func_append admincmds " + $cmds" + fi + done + fi + + # Exit here if they wanted silent mode. + $opt_quiet && exit $EXIT_SUCCESS + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + echo "----------------------------------------------------------------------" + echo "Libraries have been installed in:" + for libdir in $libdirs; do + $ECHO " $libdir" + done + echo + echo "If you ever happen to want to link against installed libraries" + echo "in a given directory, LIBDIR, you must either use libtool, and" + echo "specify the full pathname of the library, or use the '-LLIBDIR'" + echo "flag during linking and do at least one of the following:" + if test -n "$shlibpath_var"; then + echo " - add LIBDIR to the '$shlibpath_var' environment variable" + echo " during execution" + fi + if test -n "$runpath_var"; then + echo " - add LIBDIR to the '$runpath_var' environment variable" + echo " during linking" + fi + if test -n "$hardcode_libdir_flag_spec"; then + libdir=LIBDIR + eval flag=\"$hardcode_libdir_flag_spec\" + + $ECHO " - use the '$flag' linker flag" + fi + if test -n "$admincmds"; then + $ECHO " - have your system administrator run these commands:$admincmds" + fi + if test -f /etc/ld.so.conf; then + echo " - have your system administrator add LIBDIR to '/etc/ld.so.conf'" + fi + echo + + echo "See any operating system documentation about shared libraries for" + case $host in + solaris2.[6789]|solaris2.1[0-9]) + echo "more information, such as the ld(1), crle(1) and ld.so(8) manual" + echo "pages." + ;; + *) + echo "more information, such as the ld(1) and ld.so(8) manual pages." + ;; + esac + echo "----------------------------------------------------------------------" + fi + exit $EXIT_SUCCESS +} + +test finish = "$opt_mode" && func_mode_finish ${1+"$@"} + + +# func_mode_install arg... +func_mode_install () +{ + $debug_cmd + + # There may be an optional sh(1) argument at the beginning of + # install_prog (especially on Windows NT). + if test "$SHELL" = "$nonopt" || test /bin/sh = "$nonopt" || + # Allow the use of GNU shtool's install command. + case $nonopt in *shtool*) :;; *) false;; esac + then + # Aesthetically quote it. + func_quote_arg pretty "$nonopt" + install_prog="$func_quote_arg_result " + arg=$1 + shift + else + install_prog= + arg=$nonopt + fi + + # The real first argument should be the name of the installation program. + # Aesthetically quote it. + func_quote_arg pretty "$arg" + func_append install_prog "$func_quote_arg_result" + install_shared_prog=$install_prog + case " $install_prog " in + *[\\\ /]cp\ *) install_cp=: ;; + *) install_cp=false ;; + esac + + # We need to accept at least all the BSD install flags. + dest= + files= + opts= + prev= + install_type= + isdir=false + stripme= + no_mode=: + for arg + do + arg2= + if test -n "$dest"; then + func_append files " $dest" + dest=$arg + continue + fi + + case $arg in + -d) isdir=: ;; + -f) + if $install_cp; then :; else + prev=$arg + fi + ;; + -g | -m | -o) + prev=$arg + ;; + -s) + stripme=" -s" + continue + ;; + -*) + ;; + *) + # If the previous option needed an argument, then skip it. + if test -n "$prev"; then + if test X-m = "X$prev" && test -n "$install_override_mode"; then + arg2=$install_override_mode + no_mode=false + fi + prev= + else + dest=$arg + continue + fi + ;; + esac + + # Aesthetically quote the argument. + func_quote_arg pretty "$arg" + func_append install_prog " $func_quote_arg_result" + if test -n "$arg2"; then + func_quote_arg pretty "$arg2" + fi + func_append install_shared_prog " $func_quote_arg_result" + done + + test -z "$install_prog" && \ + func_fatal_help "you must specify an install program" + + test -n "$prev" && \ + func_fatal_help "the '$prev' option requires an argument" + + if test -n "$install_override_mode" && $no_mode; then + if $install_cp; then :; else + func_quote_arg pretty "$install_override_mode" + func_append install_shared_prog " -m $func_quote_arg_result" + fi + fi + + if test -z "$files"; then + if test -z "$dest"; then + func_fatal_help "no file or destination specified" + else + func_fatal_help "you must specify a destination" + fi + fi + + # Strip any trailing slash from the destination. + func_stripname '' '/' "$dest" + dest=$func_stripname_result + + # Check to see that the destination is a directory. + test -d "$dest" && isdir=: + if $isdir; then + destdir=$dest + destname= + else + func_dirname_and_basename "$dest" "" "." + destdir=$func_dirname_result + destname=$func_basename_result + + # Not a directory, so check to see that there is only one file specified. + set dummy $files; shift + test "$#" -gt 1 && \ + func_fatal_help "'$dest' is not a directory" + fi + case $destdir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + for file in $files; do + case $file in + *.lo) ;; + *) + func_fatal_help "'$destdir' must be an absolute directory name" + ;; + esac + done + ;; + esac + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic=$magic + + staticlibs= + future_libdirs= + current_libdirs= + for file in $files; do + + # Do each installation. + case $file in + *.$libext) + # Do the static libraries later. + func_append staticlibs " $file" + ;; + + *.la) + func_resolve_sysroot "$file" + file=$func_resolve_sysroot_result + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "'$file' is not a valid libtool archive" + + library_names= + old_library= + relink_command= + func_source "$file" + + # Add the libdir to current_libdirs if it is the destination. + if test "X$destdir" = "X$libdir"; then + case "$current_libdirs " in + *" $libdir "*) ;; + *) func_append current_libdirs " $libdir" ;; + esac + else + # Note the libdir as a future libdir. + case "$future_libdirs " in + *" $libdir "*) ;; + *) func_append future_libdirs " $libdir" ;; + esac + fi + + func_dirname "$file" "/" "" + dir=$func_dirname_result + func_append dir "$objdir" + + if test -n "$relink_command"; then + # Determine the prefix the user has applied to our future dir. + inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"` + + # Don't allow the user to place us outside of our expected + # location b/c this prevents finding dependent libraries that + # are installed to the same prefix. + # At present, this check doesn't affect windows .dll's that + # are installed into $libdir/../bin (currently, that works fine) + # but it's something to keep an eye on. + test "$inst_prefix_dir" = "$destdir" && \ + func_fatal_error "error: cannot install '$file' to a directory not ending in $libdir" + + if test -n "$inst_prefix_dir"; then + # Stick the inst_prefix_dir data into the link command. + relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` + else + relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%%"` + fi + + func_warning "relinking '$file'" + func_show_eval "$relink_command" \ + 'func_fatal_error "error: relink '\''$file'\'' with the above command before installing it"' + fi + + # See the names of the shared library. + set dummy $library_names; shift + if test -n "$1"; then + realname=$1 + shift + + srcname=$realname + test -n "$relink_command" && srcname=${realname}T + + # Install the shared library and build the symlinks. + func_show_eval "$install_shared_prog $dir/$srcname $destdir/$realname" \ + 'exit $?' + tstripme=$stripme + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + case $realname in + *.dll.a) + tstripme= + ;; + esac + ;; + os2*) + case $realname in + *_dll.a) + tstripme= + ;; + esac + ;; + esac + if test -n "$tstripme" && test -n "$striplib"; then + func_show_eval "$striplib $destdir/$realname" 'exit $?' + fi + + if test "$#" -gt 0; then + # Delete the old symlinks, and create new ones. + # Try 'ln -sf' first, because the 'ln' binary might depend on + # the symlink we replace! Solaris /bin/ln does not understand -f, + # so we also need to try rm && ln -s. + for linkname + do + test "$linkname" != "$realname" \ + && func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })" + done + fi + + # Do each command in the postinstall commands. + lib=$destdir/$realname + func_execute_cmds "$postinstall_cmds" 'exit $?' + fi + + # Install the pseudo-library for information purposes. + func_basename "$file" + name=$func_basename_result + instname=$dir/${name}i + func_show_eval "$install_prog $instname $destdir/$name" 'exit $?' + + # Maybe install the static library, too. + test -n "$old_library" && func_append staticlibs " $dir/$old_library" + ;; + + *.lo) + # Install (i.e. copy) a libtool object. + + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile=$destdir/$destname + else + func_basename "$file" + destfile=$func_basename_result + destfile=$destdir/$destfile + fi + + # Deduce the name of the destination old-style object file. + case $destfile in + *.lo) + func_lo2o "$destfile" + staticdest=$func_lo2o_result + ;; + *.$objext) + staticdest=$destfile + destfile= + ;; + *) + func_fatal_help "cannot copy a libtool object to '$destfile'" + ;; + esac + + # Install the libtool object if requested. + test -n "$destfile" && \ + func_show_eval "$install_prog $file $destfile" 'exit $?' + + # Install the old object if enabled. + if test yes = "$build_old_libs"; then + # Deduce the name of the old-style object file. + func_lo2o "$file" + staticobj=$func_lo2o_result + func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?' + fi + exit $EXIT_SUCCESS + ;; + + *) + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile=$destdir/$destname + else + func_basename "$file" + destfile=$func_basename_result + destfile=$destdir/$destfile + fi + + # If the file is missing, and there is a .exe on the end, strip it + # because it is most likely a libtool script we actually want to + # install + stripped_ext= + case $file in + *.exe) + if test ! -f "$file"; then + func_stripname '' '.exe' "$file" + file=$func_stripname_result + stripped_ext=.exe + fi + ;; + esac + + # Do a test to see if this is really a libtool program. + case $host in + *cygwin* | *mingw*) + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + wrapper=$func_ltwrapper_scriptname_result + else + func_stripname '' '.exe' "$file" + wrapper=$func_stripname_result + fi + ;; + *) + wrapper=$file + ;; + esac + if func_ltwrapper_script_p "$wrapper"; then + notinst_deplibs= + relink_command= + + func_source "$wrapper" + + # Check the variables that should have been set. + test -z "$generated_by_libtool_version" && \ + func_fatal_error "invalid libtool wrapper script '$wrapper'" + + finalize=: + for lib in $notinst_deplibs; do + # Check to see that each library is installed. + libdir= + if test -f "$lib"; then + func_source "$lib" + fi + libfile=$libdir/`$ECHO "$lib" | $SED 's%^.*/%%g'` + if test -n "$libdir" && test ! -f "$libfile"; then + func_warning "'$lib' has not been installed in '$libdir'" + finalize=false + fi + done + + relink_command= + func_source "$wrapper" + + outputname= + if test no = "$fast_install" && test -n "$relink_command"; then + $opt_dry_run || { + if $finalize; then + tmpdir=`func_mktempdir` + func_basename "$file$stripped_ext" + file=$func_basename_result + outputname=$tmpdir/$file + # Replace the output file specification. + relink_command=`$ECHO "$relink_command" | $SED 's%@OUTPUT@%'"$outputname"'%g'` + + $opt_quiet || { + func_quote_arg expand,pretty "$relink_command" + eval "func_echo $func_quote_arg_result" + } + if eval "$relink_command"; then : + else + func_error "error: relink '$file' with the above command before installing it" + $opt_dry_run || ${RM}r "$tmpdir" + continue + fi + file=$outputname + else + func_warning "cannot relink '$file'" + fi + } + else + # Install the binary that we compiled earlier. + file=`$ECHO "$file$stripped_ext" | $SED "s%\([^/]*\)$%$objdir/\1%"` + fi + fi + + # remove .exe since cygwin /usr/bin/install will append another + # one anyway + case $install_prog,$host in + */usr/bin/install*,*cygwin*) + case $file:$destfile in + *.exe:*.exe) + # this is ok + ;; + *.exe:*) + destfile=$destfile.exe + ;; + *:*.exe) + func_stripname '' '.exe' "$destfile" + destfile=$func_stripname_result + ;; + esac + ;; + esac + func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?' + $opt_dry_run || if test -n "$outputname"; then + ${RM}r "$tmpdir" + fi + ;; + esac + done + + for file in $staticlibs; do + func_basename "$file" + name=$func_basename_result + + # Set up the ranlib parameters. + oldlib=$destdir/$name + func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 + tool_oldlib=$func_to_tool_file_result + + func_show_eval "$install_prog \$file \$oldlib" 'exit $?' + + if test -n "$stripme" && test -n "$old_striplib"; then + func_show_eval "$old_striplib $tool_oldlib" 'exit $?' + fi + + # Do each command in the postinstall commands. + func_execute_cmds "$old_postinstall_cmds" 'exit $?' + done + + test -n "$future_libdirs" && \ + func_warning "remember to run '$progname --finish$future_libdirs'" + + if test -n "$current_libdirs"; then + # Maybe just do a dry run. + $opt_dry_run && current_libdirs=" -n$current_libdirs" + exec_cmd='$SHELL "$progpath" $preserve_args --finish$current_libdirs' + else + exit $EXIT_SUCCESS + fi +} + +test install = "$opt_mode" && func_mode_install ${1+"$@"} + + +# func_generate_dlsyms outputname originator pic_p +# Extract symbols from dlprefiles and create ${outputname}S.o with +# a dlpreopen symbol table. +func_generate_dlsyms () +{ + $debug_cmd + + my_outputname=$1 + my_originator=$2 + my_pic_p=${3-false} + my_prefix=`$ECHO "$my_originator" | $SED 's%[^a-zA-Z0-9]%_%g'` + my_dlsyms= + + if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then + if test -n "$NM" && test -n "$global_symbol_pipe"; then + my_dlsyms=${my_outputname}S.c + else + func_error "not configured to extract global symbols from dlpreopened files" + fi + fi + + if test -n "$my_dlsyms"; then + case $my_dlsyms in + "") ;; + *.c) + # Discover the nlist of each of the dlfiles. + nlist=$output_objdir/$my_outputname.nm + + func_show_eval "$RM $nlist ${nlist}S ${nlist}T" + + # Parse the name list into a source file. + func_verbose "creating $output_objdir/$my_dlsyms" + + $opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\ +/* $my_dlsyms - symbol resolution table for '$my_outputname' dlsym emulation. */ +/* Generated by $PROGRAM (GNU $PACKAGE) $VERSION */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +#if defined __GNUC__ && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4)) +#pragma GCC diagnostic ignored \"-Wstrict-prototypes\" +#endif + +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE +/* DATA imports from DLLs on WIN32 can't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT_DLSYM_CONST +#elif defined __osf__ +/* This system does not cope well with relocations in const data. */ +# define LT_DLSYM_CONST +#else +# define LT_DLSYM_CONST const +#endif + +#define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0) + +/* External symbol declarations for the compiler. */\ +" + + if test yes = "$dlself"; then + func_verbose "generating symbol list for '$output'" + + $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist" + + # Add our own program objects to the symbol list. + progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP` + for progfile in $progfiles; do + func_to_tool_file "$progfile" func_convert_file_msys_to_w32 + func_verbose "extracting global C symbols from '$func_to_tool_file_result'" + $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'" + done + + if test -n "$exclude_expsyms"; then + $opt_dry_run || { + eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + if test -n "$export_symbols_regex"; then + $opt_dry_run || { + eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + export_symbols=$output_objdir/$outputname.exp + $opt_dry_run || { + $RM $export_symbols + eval "$SED -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' + ;; + esac + } + else + $opt_dry_run || { + eval "$SED -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' + eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' + ;; + esac + } + fi + fi + + for dlprefile in $dlprefiles; do + func_verbose "extracting global C symbols from '$dlprefile'" + func_basename "$dlprefile" + name=$func_basename_result + case $host in + *cygwin* | *mingw* | *cegcc* ) + # if an import library, we need to obtain dlname + if func_win32_import_lib_p "$dlprefile"; then + func_tr_sh "$dlprefile" + eval "curr_lafile=\$libfile_$func_tr_sh_result" + dlprefile_dlbasename= + if test -n "$curr_lafile" && func_lalib_p "$curr_lafile"; then + # Use subshell, to avoid clobbering current variable values + dlprefile_dlname=`source "$curr_lafile" && echo "$dlname"` + if test -n "$dlprefile_dlname"; then + func_basename "$dlprefile_dlname" + dlprefile_dlbasename=$func_basename_result + else + # no lafile. user explicitly requested -dlpreopen . + $sharedlib_from_linklib_cmd "$dlprefile" + dlprefile_dlbasename=$sharedlib_from_linklib_result + fi + fi + $opt_dry_run || { + if test -n "$dlprefile_dlbasename"; then + eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"' + else + func_warning "Could not compute DLL name from $name" + eval '$ECHO ": $name " >> "$nlist"' + fi + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe | + $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'" + } + else # not an import lib + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + fi + ;; + *) + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + ;; + esac + done + + $opt_dry_run || { + # Make sure we have at least an empty file. + test -f "$nlist" || : > "$nlist" + + if test -n "$exclude_expsyms"; then + $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T + $MV "$nlist"T "$nlist" + fi + + # Try sorting and uniquifying the output. + if $GREP -v "^: " < "$nlist" | + if sort -k 3 /dev/null 2>&1; then + sort -k 3 + else + sort +2 + fi | + uniq > "$nlist"S; then + : + else + $GREP -v "^: " < "$nlist" > "$nlist"S + fi + + if test -f "$nlist"S; then + eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"' + else + echo '/* NONE */' >> "$output_objdir/$my_dlsyms" + fi + + func_show_eval '$RM "${nlist}I"' + if test -n "$global_symbol_to_import"; then + eval "$global_symbol_to_import"' < "$nlist"S > "$nlist"I' + fi + + echo >> "$output_objdir/$my_dlsyms" "\ + +/* The mapping between symbol names and symbols. */ +typedef struct { + const char *name; + void *address; +} lt_dlsymlist; +extern LT_DLSYM_CONST lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[];\ +" + + if test -s "$nlist"I; then + echo >> "$output_objdir/$my_dlsyms" "\ +static void lt_syminit(void) +{ + LT_DLSYM_CONST lt_dlsymlist *symbol = lt_${my_prefix}_LTX_preloaded_symbols; + for (; symbol->name; ++symbol) + {" + $SED 's/.*/ if (STREQ (symbol->name, \"&\")) symbol->address = (void *) \&&;/' < "$nlist"I >> "$output_objdir/$my_dlsyms" + echo >> "$output_objdir/$my_dlsyms" "\ + } +}" + fi + echo >> "$output_objdir/$my_dlsyms" "\ +LT_DLSYM_CONST lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[] = +{ {\"$my_originator\", (void *) 0}," + + if test -s "$nlist"I; then + echo >> "$output_objdir/$my_dlsyms" "\ + {\"@INIT@\", (void *) <_syminit}," + fi + + case $need_lib_prefix in + no) + eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + *) + eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + esac + echo >> "$output_objdir/$my_dlsyms" "\ + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt_${my_prefix}_LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif\ +" + } # !$opt_dry_run + + pic_flag_for_symtable= + case "$compile_command " in + *" -static "*) ;; + *) + case $host in + # compiling the symbol table file with pic_flag works around + # a FreeBSD bug that causes programs to crash when -lm is + # linked before any other PIC object. But we must not use + # pic_flag when linking with -static. The problem exists in + # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. + *-*-freebsd2.*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) + pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;; + *-*-hpux*) + pic_flag_for_symtable=" $pic_flag" ;; + *) + $my_pic_p && pic_flag_for_symtable=" $pic_flag" + ;; + esac + ;; + esac + symtab_cflags= + for arg in $LTCFLAGS; do + case $arg in + -pie | -fpie | -fPIE) ;; + *) func_append symtab_cflags " $arg" ;; + esac + done + + # Now compile the dynamic symbol file. + func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?' + + # Clean up the generated files. + func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T" "${nlist}I"' + + # Transform the symbol file into the correct name. + symfileobj=$output_objdir/${my_outputname}S.$objext + case $host in + *cygwin* | *mingw* | *cegcc* ) + if test -f "$output_objdir/$my_outputname.def"; then + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + else + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` + fi + ;; + *) + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` + ;; + esac + ;; + *) + func_fatal_error "unknown suffix for '$my_dlsyms'" + ;; + esac + else + # We keep going just in case the user didn't refer to + # lt_preloaded_symbols. The linker will fail if global_symbol_pipe + # really was required. + + # Nullify the symbol file. + compile_command=`$ECHO "$compile_command" | $SED "s% @SYMFILE@%%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s% @SYMFILE@%%"` + fi +} + +# func_cygming_gnu_implib_p ARG +# This predicate returns with zero status (TRUE) if +# ARG is a GNU/binutils-style import library. Returns +# with nonzero status (FALSE) otherwise. +func_cygming_gnu_implib_p () +{ + $debug_cmd + + func_to_tool_file "$1" func_convert_file_msys_to_w32 + func_cygming_gnu_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$'` + test -n "$func_cygming_gnu_implib_tmp" +} + +# func_cygming_ms_implib_p ARG +# This predicate returns with zero status (TRUE) if +# ARG is an MS-style import library. Returns +# with nonzero status (FALSE) otherwise. +func_cygming_ms_implib_p () +{ + $debug_cmd + + func_to_tool_file "$1" func_convert_file_msys_to_w32 + func_cygming_ms_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $GREP '_NULL_IMPORT_DESCRIPTOR'` + test -n "$func_cygming_ms_implib_tmp" +} + +# func_win32_libid arg +# return the library type of file 'arg' +# +# Need a lot of goo to handle *both* DLLs and import libs +# Has to be a shell function in order to 'eat' the argument +# that is supplied when $file_magic_command is called. +# Despite the name, also deal with 64 bit binaries. +func_win32_libid () +{ + $debug_cmd + + win32_libid_type=unknown + win32_fileres=`file -L $1 2>/dev/null` + case $win32_fileres in + *ar\ archive\ import\ library*) # definitely import + win32_libid_type="x86 archive import" + ;; + *ar\ archive*) # could be an import, or static + # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD. + if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | + $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then + case $nm_interface in + "MS dumpbin") + if func_cygming_ms_implib_p "$1" || + func_cygming_gnu_implib_p "$1" + then + win32_nmres=import + else + win32_nmres= + fi + ;; + *) + func_to_tool_file "$1" func_convert_file_msys_to_w32 + win32_nmres=`eval $NM -f posix -A \"$func_to_tool_file_result\" | + $SED -n -e ' + 1,100{ + / I /{ + s|.*|import| + p + q + } + }'` + ;; + esac + case $win32_nmres in + import*) win32_libid_type="x86 archive import";; + *) win32_libid_type="x86 archive static";; + esac + fi + ;; + *DLL*) + win32_libid_type="x86 DLL" + ;; + *executable*) # but shell scripts are "executable" too... + case $win32_fileres in + *MS\ Windows\ PE\ Intel*) + win32_libid_type="x86 DLL" + ;; + esac + ;; + esac + $ECHO "$win32_libid_type" +} + +# func_cygming_dll_for_implib ARG +# +# Platform-specific function to extract the +# name of the DLL associated with the specified +# import library ARG. +# Invoked by eval'ing the libtool variable +# $sharedlib_from_linklib_cmd +# Result is available in the variable +# $sharedlib_from_linklib_result +func_cygming_dll_for_implib () +{ + $debug_cmd + + sharedlib_from_linklib_result=`$DLLTOOL --identify-strict --identify "$1"` +} + +# func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs +# +# The is the core of a fallback implementation of a +# platform-specific function to extract the name of the +# DLL associated with the specified import library LIBNAME. +# +# SECTION_NAME is either .idata$6 or .idata$7, depending +# on the platform and compiler that created the implib. +# +# Echos the name of the DLL associated with the +# specified import library. +func_cygming_dll_for_implib_fallback_core () +{ + $debug_cmd + + match_literal=`$ECHO "$1" | $SED "$sed_make_literal_regex"` + $OBJDUMP -s --section "$1" "$2" 2>/dev/null | + $SED '/^Contents of section '"$match_literal"':/{ + # Place marker at beginning of archive member dllname section + s/.*/====MARK====/ + p + d + } + # These lines can sometimes be longer than 43 characters, but + # are always uninteresting + /:[ ]*file format pe[i]\{,1\}-/d + /^In archive [^:]*:/d + # Ensure marker is printed + /^====MARK====/p + # Remove all lines with less than 43 characters + /^.\{43\}/!d + # From remaining lines, remove first 43 characters + s/^.\{43\}//' | + $SED -n ' + # Join marker and all lines until next marker into a single line + /^====MARK====/ b para + H + $ b para + b + :para + x + s/\n//g + # Remove the marker + s/^====MARK====// + # Remove trailing dots and whitespace + s/[\. \t]*$// + # Print + /./p' | + # we now have a list, one entry per line, of the stringified + # contents of the appropriate section of all members of the + # archive that possess that section. Heuristic: eliminate + # all those that have a first or second character that is + # a '.' (that is, objdump's representation of an unprintable + # character.) This should work for all archives with less than + # 0x302f exports -- but will fail for DLLs whose name actually + # begins with a literal '.' or a single character followed by + # a '.'. + # + # Of those that remain, print the first one. + $SED -e '/^\./d;/^.\./d;q' +} + +# func_cygming_dll_for_implib_fallback ARG +# Platform-specific function to extract the +# name of the DLL associated with the specified +# import library ARG. +# +# This fallback implementation is for use when $DLLTOOL +# does not support the --identify-strict option. +# Invoked by eval'ing the libtool variable +# $sharedlib_from_linklib_cmd +# Result is available in the variable +# $sharedlib_from_linklib_result +func_cygming_dll_for_implib_fallback () +{ + $debug_cmd + + if func_cygming_gnu_implib_p "$1"; then + # binutils import library + sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$7' "$1"` + elif func_cygming_ms_implib_p "$1"; then + # ms-generated import library + sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$6' "$1"` + else + # unknown + sharedlib_from_linklib_result= + fi +} + + +# func_extract_an_archive dir oldlib +func_extract_an_archive () +{ + $debug_cmd + + f_ex_an_ar_dir=$1; shift + f_ex_an_ar_oldlib=$1 + if test yes = "$lock_old_archive_extraction"; then + lockfile=$f_ex_an_ar_oldlib.lock + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" + sleep 2 + done + fi + func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" \ + 'stat=$?; rm -f "$lockfile"; exit $stat' + if test yes = "$lock_old_archive_extraction"; then + $opt_dry_run || rm -f "$lockfile" + fi + if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then + : + else + func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" + fi +} + + +# func_extract_archives gentop oldlib ... +func_extract_archives () +{ + $debug_cmd + + my_gentop=$1; shift + my_oldlibs=${1+"$@"} + my_oldobjs= + my_xlib= + my_xabs= + my_xdir= + + for my_xlib in $my_oldlibs; do + # Extract the objects. + case $my_xlib in + [\\/]* | [A-Za-z]:[\\/]*) my_xabs=$my_xlib ;; + *) my_xabs=`pwd`"/$my_xlib" ;; + esac + func_basename "$my_xlib" + my_xlib=$func_basename_result + my_xlib_u=$my_xlib + while :; do + case " $extracted_archives " in + *" $my_xlib_u "*) + func_arith $extracted_serial + 1 + extracted_serial=$func_arith_result + my_xlib_u=lt$extracted_serial-$my_xlib ;; + *) break ;; + esac + done + extracted_archives="$extracted_archives $my_xlib_u" + my_xdir=$my_gentop/$my_xlib_u + + func_mkdir_p "$my_xdir" + + case $host in + *-darwin*) + func_verbose "Extracting $my_xabs" + # Do not bother doing anything if just a dry run + $opt_dry_run || { + darwin_orig_dir=`pwd` + cd $my_xdir || exit $? + darwin_archive=$my_xabs + darwin_curdir=`pwd` + func_basename "$darwin_archive" + darwin_base_archive=$func_basename_result + darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true` + if test -n "$darwin_arches"; then + darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'` + darwin_arch= + func_verbose "$darwin_base_archive has multiple architectures $darwin_arches" + for darwin_arch in $darwin_arches; do + func_mkdir_p "unfat-$$/$darwin_base_archive-$darwin_arch" + $LIPO -thin $darwin_arch -output "unfat-$$/$darwin_base_archive-$darwin_arch/$darwin_base_archive" "$darwin_archive" + cd "unfat-$$/$darwin_base_archive-$darwin_arch" + func_extract_an_archive "`pwd`" "$darwin_base_archive" + cd "$darwin_curdir" + $RM "unfat-$$/$darwin_base_archive-$darwin_arch/$darwin_base_archive" + done # $darwin_arches + ## Okay now we've a bunch of thin objects, gotta fatten them up :) + darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$sed_basename" | sort -u` + darwin_file= + darwin_files= + for darwin_file in $darwin_filelist; do + darwin_files=`find unfat-$$ -name $darwin_file -print | sort | $NL2SP` + $LIPO -create -output "$darwin_file" $darwin_files + done # $darwin_filelist + $RM -rf unfat-$$ + cd "$darwin_orig_dir" + else + cd $darwin_orig_dir + func_extract_an_archive "$my_xdir" "$my_xabs" + fi # $darwin_arches + } # !$opt_dry_run + ;; + *) + func_extract_an_archive "$my_xdir" "$my_xabs" + ;; + esac + my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | sort | $NL2SP` + done + + func_extract_archives_result=$my_oldobjs +} + + +# func_emit_wrapper [arg=no] +# +# Emit a libtool wrapper script on stdout. +# Don't directly open a file because we may want to +# incorporate the script contents within a cygwin/mingw +# wrapper executable. Must ONLY be called from within +# func_mode_link because it depends on a number of variables +# set therein. +# +# ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR +# variable will take. If 'yes', then the emitted script +# will assume that the directory where it is stored is +# the $objdir directory. This is a cygwin/mingw-specific +# behavior. +func_emit_wrapper () +{ + func_emit_wrapper_arg1=${1-no} + + $ECHO "\ +#! $SHELL + +# $output - temporary wrapper script for $objdir/$outputname +# Generated by $PROGRAM (GNU $PACKAGE) $VERSION +# +# The $output program cannot be directly executed until all the libtool +# libraries that it depends on are installed. +# +# This wrapper script should never be moved out of the build directory. +# If it is, it will not operate correctly. + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +sed_quote_subst='$sed_quote_subst' + +# Be Bourne compatible +if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac +fi +BIN_SH=xpg4; export BIN_SH # for Tru64 +DUALCASE=1; export DUALCASE # for MKS sh + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +relink_command=\"$relink_command\" + +# This environment variable determines our operation mode. +if test \"\$libtool_install_magic\" = \"$magic\"; then + # install mode needs the following variables: + generated_by_libtool_version='$macro_version' + notinst_deplibs='$notinst_deplibs' +else + # When we are sourced in execute mode, \$file and \$ECHO are already set. + if test \"\$libtool_execute_magic\" != \"$magic\"; then + file=\"\$0\"" + + func_quote_arg pretty "$ECHO" + qECHO=$func_quote_arg_result + $ECHO "\ + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$1 +_LTECHO_EOF' +} + ECHO=$qECHO + fi + +# Very basic option parsing. These options are (a) specific to +# the libtool wrapper, (b) are identical between the wrapper +# /script/ and the wrapper /executable/ that is used only on +# windows platforms, and (c) all begin with the string "--lt-" +# (application programs are unlikely to have options that match +# this pattern). +# +# There are only two supported options: --lt-debug and +# --lt-dump-script. There is, deliberately, no --lt-help. +# +# The first argument to this parsing function should be the +# script's $0 value, followed by "$@". +lt_option_debug= +func_parse_lt_options () +{ + lt_script_arg0=\$0 + shift + for lt_opt + do + case \"\$lt_opt\" in + --lt-debug) lt_option_debug=1 ;; + --lt-dump-script) + lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\` + test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=. + lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\` + cat \"\$lt_dump_D/\$lt_dump_F\" + exit 0 + ;; + --lt-*) + \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2 + exit 1 + ;; + esac + done + + # Print the debug banner immediately: + if test -n \"\$lt_option_debug\"; then + echo \"$outputname:$output:\$LINENO: libtool wrapper (GNU $PACKAGE) $VERSION\" 1>&2 + fi +} + +# Used when --lt-debug. Prints its arguments to stdout +# (redirection is the responsibility of the caller) +func_lt_dump_args () +{ + lt_dump_args_N=1; + for lt_arg + do + \$ECHO \"$outputname:$output:\$LINENO: newargv[\$lt_dump_args_N]: \$lt_arg\" + lt_dump_args_N=\`expr \$lt_dump_args_N + 1\` + done +} + +# Core function for launching the target application +func_exec_program_core () +{ +" + case $host in + # Backslashes separate directories on plain windows + *-*-mingw | *-*-os2* | *-cegcc*) + $ECHO "\ + if test -n \"\$lt_option_debug\"; then + \$ECHO \"$outputname:$output:\$LINENO: newargv[0]: \$progdir\\\\\$program\" 1>&2 + func_lt_dump_args \${1+\"\$@\"} 1>&2 + fi + exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} +" + ;; + + *) + $ECHO "\ + if test -n \"\$lt_option_debug\"; then + \$ECHO \"$outputname:$output:\$LINENO: newargv[0]: \$progdir/\$program\" 1>&2 + func_lt_dump_args \${1+\"\$@\"} 1>&2 + fi + exec \"\$progdir/\$program\" \${1+\"\$@\"} +" + ;; + esac + $ECHO "\ + \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 + exit 1 +} + +# A function to encapsulate launching the target application +# Strips options in the --lt-* namespace from \$@ and +# launches target application with the remaining arguments. +func_exec_program () +{ + case \" \$* \" in + *\\ --lt-*) + for lt_wr_arg + do + case \$lt_wr_arg in + --lt-*) ;; + *) set x \"\$@\" \"\$lt_wr_arg\"; shift;; + esac + shift + done ;; + esac + func_exec_program_core \${1+\"\$@\"} +} + + # Parse options + func_parse_lt_options \"\$0\" \${1+\"\$@\"} + + # Find the directory that this script lives in. + thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\` + test \"x\$thisdir\" = \"x\$file\" && thisdir=. + + # Follow symbolic links until we get to the real thisdir. + file=\`ls -ld \"\$file\" | $SED -n 's/.*-> //p'\` + while test -n \"\$file\"; do + destdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*\$%%'\` + + # If there was a directory component, then change thisdir. + if test \"x\$destdir\" != \"x\$file\"; then + case \"\$destdir\" in + [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; + *) thisdir=\"\$thisdir/\$destdir\" ;; + esac + fi + + file=\`\$ECHO \"\$file\" | $SED 's%^.*/%%'\` + file=\`ls -ld \"\$thisdir/\$file\" | $SED -n 's/.*-> //p'\` + done + + # Usually 'no', except on cygwin/mingw when embedded into + # the cwrapper. + WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1 + if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then + # special case for '.' + if test \"\$thisdir\" = \".\"; then + thisdir=\`pwd\` + fi + # remove .libs from thisdir + case \"\$thisdir\" in + *[\\\\/]$objdir ) thisdir=\`\$ECHO \"\$thisdir\" | $SED 's%[\\\\/][^\\\\/]*$%%'\` ;; + $objdir ) thisdir=. ;; + esac + fi + + # Try to get the absolute directory name. + absdir=\`cd \"\$thisdir\" && pwd\` + test -n \"\$absdir\" && thisdir=\"\$absdir\" +" + + if test yes = "$fast_install"; then + $ECHO "\ + program=lt-'$outputname'$exeext + progdir=\"\$thisdir/$objdir\" + + if test ! -f \"\$progdir/\$program\" || + { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | $SED 1q\`; \\ + test \"X\$file\" != \"X\$progdir/\$program\"; }; then + + file=\"\$\$-\$program\" + + if test ! -d \"\$progdir\"; then + $MKDIR \"\$progdir\" + else + $RM \"\$progdir/\$file\" + fi" + + $ECHO "\ + + # relink executable if necessary + if test -n \"\$relink_command\"; then + if relink_command_output=\`eval \$relink_command 2>&1\`; then : + else + \$ECHO \"\$relink_command_output\" >&2 + $RM \"\$progdir/\$file\" + exit 1 + fi + fi + + $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || + { $RM \"\$progdir/\$program\"; + $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; } + $RM \"\$progdir/\$file\" + fi" + else + $ECHO "\ + program='$outputname' + progdir=\"\$thisdir/$objdir\" +" + fi + + $ECHO "\ + + if test -f \"\$progdir/\$program\"; then" + + # fixup the dll searchpath if we need to. + # + # Fix the DLL searchpath if we need to. Do this before prepending + # to shlibpath, because on Windows, both are PATH and uninstalled + # libraries must come first. + if test -n "$dllsearchpath"; then + $ECHO "\ + # Add the dll search path components to the executable PATH + PATH=$dllsearchpath:\$PATH +" + fi + + # Export our shlibpath_var if we have one. + if test yes = "$shlibpath_overrides_runpath" && test -n "$shlibpath_var" && test -n "$temp_rpath"; then + $ECHO "\ + # Add our own library path to $shlibpath_var + $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" + + # Some systems cannot cope with colon-terminated $shlibpath_var + # The second colon is a workaround for a bug in BeOS R4 sed + $shlibpath_var=\`\$ECHO \"\$$shlibpath_var\" | $SED 's/::*\$//'\` + + export $shlibpath_var +" + fi + + $ECHO "\ + if test \"\$libtool_execute_magic\" != \"$magic\"; then + # Run the actual program with our arguments. + func_exec_program \${1+\"\$@\"} + fi + else + # The program doesn't exist. + \$ECHO \"\$0: error: '\$progdir/\$program' does not exist\" 1>&2 + \$ECHO \"This script is just a wrapper for \$program.\" 1>&2 + \$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2 + exit 1 + fi +fi\ +" +} + + +# func_emit_cwrapperexe_src +# emit the source code for a wrapper executable on stdout +# Must ONLY be called from within func_mode_link because +# it depends on a number of variable set therein. +func_emit_cwrapperexe_src () +{ + cat < +#include +#ifdef _MSC_VER +# include +# include +# include +#else +# include +# include +# ifdef __CYGWIN__ +# include +# endif +#endif +#include +#include +#include +#include +#include +#include +#include +#include + +#define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0) + +/* declarations of non-ANSI functions */ +#if defined __MINGW32__ +# ifdef __STRICT_ANSI__ +int _putenv (const char *); +# endif +#elif defined __CYGWIN__ +# ifdef __STRICT_ANSI__ +char *realpath (const char *, char *); +int putenv (char *); +int setenv (const char *, const char *, int); +# endif +/* #elif defined other_platform || defined ... */ +#endif + +/* portability defines, excluding path handling macros */ +#if defined _MSC_VER +# define setmode _setmode +# define stat _stat +# define chmod _chmod +# define getcwd _getcwd +# define putenv _putenv +# define S_IXUSR _S_IEXEC +#elif defined __MINGW32__ +# define setmode _setmode +# define stat _stat +# define chmod _chmod +# define getcwd _getcwd +# define putenv _putenv +#elif defined __CYGWIN__ +# define HAVE_SETENV +# define FOPEN_WB "wb" +/* #elif defined other platforms ... */ +#endif + +#if defined PATH_MAX +# define LT_PATHMAX PATH_MAX +#elif defined MAXPATHLEN +# define LT_PATHMAX MAXPATHLEN +#else +# define LT_PATHMAX 1024 +#endif + +#ifndef S_IXOTH +# define S_IXOTH 0 +#endif +#ifndef S_IXGRP +# define S_IXGRP 0 +#endif + +/* path handling portability macros */ +#ifndef DIR_SEPARATOR +# define DIR_SEPARATOR '/' +# define PATH_SEPARATOR ':' +#endif + +#if defined _WIN32 || defined __MSDOS__ || defined __DJGPP__ || \ + defined __OS2__ +# define HAVE_DOS_BASED_FILE_SYSTEM +# define FOPEN_WB "wb" +# ifndef DIR_SEPARATOR_2 +# define DIR_SEPARATOR_2 '\\' +# endif +# ifndef PATH_SEPARATOR_2 +# define PATH_SEPARATOR_2 ';' +# endif +#endif + +#ifndef DIR_SEPARATOR_2 +# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) +#else /* DIR_SEPARATOR_2 */ +# define IS_DIR_SEPARATOR(ch) \ + (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) +#endif /* DIR_SEPARATOR_2 */ + +#ifndef PATH_SEPARATOR_2 +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) +#else /* PATH_SEPARATOR_2 */ +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) +#endif /* PATH_SEPARATOR_2 */ + +#ifndef FOPEN_WB +# define FOPEN_WB "w" +#endif +#ifndef _O_BINARY +# define _O_BINARY 0 +#endif + +#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) +#define XFREE(stale) do { \ + if (stale) { free (stale); stale = 0; } \ +} while (0) + +#if defined LT_DEBUGWRAPPER +static int lt_debug = 1; +#else +static int lt_debug = 0; +#endif + +const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */ + +void *xmalloc (size_t num); +char *xstrdup (const char *string); +const char *base_name (const char *name); +char *find_executable (const char *wrapper); +char *chase_symlinks (const char *pathspec); +int make_executable (const char *path); +int check_executable (const char *path); +char *strendzap (char *str, const char *pat); +void lt_debugprintf (const char *file, int line, const char *fmt, ...); +void lt_fatal (const char *file, int line, const char *message, ...); +static const char *nonnull (const char *s); +static const char *nonempty (const char *s); +void lt_setenv (const char *name, const char *value); +char *lt_extend_str (const char *orig_value, const char *add, int to_end); +void lt_update_exe_path (const char *name, const char *value); +void lt_update_lib_path (const char *name, const char *value); +char **prepare_spawn (char **argv); +void lt_dump_script (FILE *f); +EOF + + cat <= 0) + && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) + return 1; + else + return 0; +} + +int +make_executable (const char *path) +{ + int rval = 0; + struct stat st; + + lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n", + nonempty (path)); + if ((!path) || (!*path)) + return 0; + + if (stat (path, &st) >= 0) + { + rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR); + } + return rval; +} + +/* Searches for the full path of the wrapper. Returns + newly allocated full path name if found, NULL otherwise + Does not chase symlinks, even on platforms that support them. +*/ +char * +find_executable (const char *wrapper) +{ + int has_slash = 0; + const char *p; + const char *p_next; + /* static buffer for getcwd */ + char tmp[LT_PATHMAX + 1]; + size_t tmp_len; + char *concat_name; + + lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n", + nonempty (wrapper)); + + if ((wrapper == NULL) || (*wrapper == '\0')) + return NULL; + + /* Absolute path? */ +#if defined HAVE_DOS_BASED_FILE_SYSTEM + if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':') + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + else + { +#endif + if (IS_DIR_SEPARATOR (wrapper[0])) + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } +#if defined HAVE_DOS_BASED_FILE_SYSTEM + } +#endif + + for (p = wrapper; *p; p++) + if (*p == '/') + { + has_slash = 1; + break; + } + if (!has_slash) + { + /* no slashes; search PATH */ + const char *path = getenv ("PATH"); + if (path != NULL) + { + for (p = path; *p; p = p_next) + { + const char *q; + size_t p_len; + for (q = p; *q; q++) + if (IS_PATH_SEPARATOR (*q)) + break; + p_len = (size_t) (q - p); + p_next = (*q == '\0' ? q : q + 1); + if (p_len == 0) + { + /* empty path: current directory */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", + nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = + XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + } + else + { + concat_name = + XMALLOC (char, p_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, p, p_len); + concat_name[p_len] = '/'; + strcpy (concat_name + p_len + 1, wrapper); + } + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + } + /* not found in PATH; assume curdir */ + } + /* Relative path | not found in path: prepend cwd */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", + nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + return NULL; +} + +char * +chase_symlinks (const char *pathspec) +{ +#ifndef S_ISLNK + return xstrdup (pathspec); +#else + char buf[LT_PATHMAX]; + struct stat s; + char *tmp_pathspec = xstrdup (pathspec); + char *p; + int has_symlinks = 0; + while (strlen (tmp_pathspec) && !has_symlinks) + { + lt_debugprintf (__FILE__, __LINE__, + "checking path component for symlinks: %s\n", + tmp_pathspec); + if (lstat (tmp_pathspec, &s) == 0) + { + if (S_ISLNK (s.st_mode) != 0) + { + has_symlinks = 1; + break; + } + + /* search backwards for last DIR_SEPARATOR */ + p = tmp_pathspec + strlen (tmp_pathspec) - 1; + while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + p--; + if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + { + /* no more DIR_SEPARATORS left */ + break; + } + *p = '\0'; + } + else + { + lt_fatal (__FILE__, __LINE__, + "error accessing file \"%s\": %s", + tmp_pathspec, nonnull (strerror (errno))); + } + } + XFREE (tmp_pathspec); + + if (!has_symlinks) + { + return xstrdup (pathspec); + } + + tmp_pathspec = realpath (pathspec, buf); + if (tmp_pathspec == 0) + { + lt_fatal (__FILE__, __LINE__, + "could not follow symlinks for %s", pathspec); + } + return xstrdup (tmp_pathspec); +#endif +} + +char * +strendzap (char *str, const char *pat) +{ + size_t len, patlen; + + assert (str != NULL); + assert (pat != NULL); + + len = strlen (str); + patlen = strlen (pat); + + if (patlen <= len) + { + str += len - patlen; + if (STREQ (str, pat)) + *str = '\0'; + } + return str; +} + +void +lt_debugprintf (const char *file, int line, const char *fmt, ...) +{ + va_list args; + if (lt_debug) + { + (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line); + va_start (args, fmt); + (void) vfprintf (stderr, fmt, args); + va_end (args); + } +} + +static void +lt_error_core (int exit_status, const char *file, + int line, const char *mode, + const char *message, va_list ap) +{ + fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode); + vfprintf (stderr, message, ap); + fprintf (stderr, ".\n"); + + if (exit_status >= 0) + exit (exit_status); +} + +void +lt_fatal (const char *file, int line, const char *message, ...) +{ + va_list ap; + va_start (ap, message); + lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap); + va_end (ap); +} + +static const char * +nonnull (const char *s) +{ + return s ? s : "(null)"; +} + +static const char * +nonempty (const char *s) +{ + return (s && !*s) ? "(empty)" : nonnull (s); +} + +void +lt_setenv (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_setenv) setting '%s' to '%s'\n", + nonnull (name), nonnull (value)); + { +#ifdef HAVE_SETENV + /* always make a copy, for consistency with !HAVE_SETENV */ + char *str = xstrdup (value); + setenv (name, str, 1); +#else + size_t len = strlen (name) + 1 + strlen (value) + 1; + char *str = XMALLOC (char, len); + sprintf (str, "%s=%s", name, value); + if (putenv (str) != EXIT_SUCCESS) + { + XFREE (str); + } +#endif + } +} + +char * +lt_extend_str (const char *orig_value, const char *add, int to_end) +{ + char *new_value; + if (orig_value && *orig_value) + { + size_t orig_value_len = strlen (orig_value); + size_t add_len = strlen (add); + new_value = XMALLOC (char, add_len + orig_value_len + 1); + if (to_end) + { + strcpy (new_value, orig_value); + strcpy (new_value + orig_value_len, add); + } + else + { + strcpy (new_value, add); + strcpy (new_value + add_len, orig_value); + } + } + else + { + new_value = xstrdup (add); + } + return new_value; +} + +void +lt_update_exe_path (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_update_exe_path) modifying '%s' by prepending '%s'\n", + nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + /* some systems can't cope with a ':'-terminated path #' */ + size_t len = strlen (new_value); + while ((len > 0) && IS_PATH_SEPARATOR (new_value[len-1])) + { + new_value[--len] = '\0'; + } + lt_setenv (name, new_value); + XFREE (new_value); + } +} + +void +lt_update_lib_path (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_update_lib_path) modifying '%s' by prepending '%s'\n", + nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + lt_setenv (name, new_value); + XFREE (new_value); + } +} + +EOF + case $host_os in + mingw*) + cat <<"EOF" + +/* Prepares an argument vector before calling spawn(). + Note that spawn() does not by itself call the command interpreter + (getenv ("COMSPEC") != NULL ? getenv ("COMSPEC") : + ({ OSVERSIONINFO v; v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); + GetVersionEx(&v); + v.dwPlatformId == VER_PLATFORM_WIN32_NT; + }) ? "cmd.exe" : "command.com"). + Instead it simply concatenates the arguments, separated by ' ', and calls + CreateProcess(). We must quote the arguments since Win32 CreateProcess() + interprets characters like ' ', '\t', '\\', '"' (but not '<' and '>') in a + special way: + - Space and tab are interpreted as delimiters. They are not treated as + delimiters if they are surrounded by double quotes: "...". + - Unescaped double quotes are removed from the input. Their only effect is + that within double quotes, space and tab are treated like normal + characters. + - Backslashes not followed by double quotes are not special. + - But 2*n+1 backslashes followed by a double quote become + n backslashes followed by a double quote (n >= 0): + \" -> " + \\\" -> \" + \\\\\" -> \\" + */ +#define SHELL_SPECIAL_CHARS "\"\\ \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" +#define SHELL_SPACE_CHARS " \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" +char ** +prepare_spawn (char **argv) +{ + size_t argc; + char **new_argv; + size_t i; + + /* Count number of arguments. */ + for (argc = 0; argv[argc] != NULL; argc++) + ; + + /* Allocate new argument vector. */ + new_argv = XMALLOC (char *, argc + 1); + + /* Put quoted arguments into the new argument vector. */ + for (i = 0; i < argc; i++) + { + const char *string = argv[i]; + + if (string[0] == '\0') + new_argv[i] = xstrdup ("\"\""); + else if (strpbrk (string, SHELL_SPECIAL_CHARS) != NULL) + { + int quote_around = (strpbrk (string, SHELL_SPACE_CHARS) != NULL); + size_t length; + unsigned int backslashes; + const char *s; + char *quoted_string; + char *p; + + length = 0; + backslashes = 0; + if (quote_around) + length++; + for (s = string; *s != '\0'; s++) + { + char c = *s; + if (c == '"') + length += backslashes + 1; + length++; + if (c == '\\') + backslashes++; + else + backslashes = 0; + } + if (quote_around) + length += backslashes + 1; + + quoted_string = XMALLOC (char, length + 1); + + p = quoted_string; + backslashes = 0; + if (quote_around) + *p++ = '"'; + for (s = string; *s != '\0'; s++) + { + char c = *s; + if (c == '"') + { + unsigned int j; + for (j = backslashes + 1; j > 0; j--) + *p++ = '\\'; + } + *p++ = c; + if (c == '\\') + backslashes++; + else + backslashes = 0; + } + if (quote_around) + { + unsigned int j; + for (j = backslashes; j > 0; j--) + *p++ = '\\'; + *p++ = '"'; + } + *p = '\0'; + + new_argv[i] = quoted_string; + } + else + new_argv[i] = (char *) string; + } + new_argv[argc] = NULL; + + return new_argv; +} +EOF + ;; + esac + + cat <<"EOF" +void lt_dump_script (FILE* f) +{ +EOF + func_emit_wrapper yes | + $SED -n -e ' +s/^\(.\{79\}\)\(..*\)/\1\ +\2/ +h +s/\([\\"]\)/\\\1/g +s/$/\\n/ +s/\([^\n]*\).*/ fputs ("\1", f);/p +g +D' + cat <<"EOF" +} +EOF +} +# end: func_emit_cwrapperexe_src + +# func_win32_import_lib_p ARG +# True if ARG is an import lib, as indicated by $file_magic_cmd +func_win32_import_lib_p () +{ + $debug_cmd + + case `eval $file_magic_cmd \"\$1\" 2>/dev/null | $SED -e 10q` in + *import*) : ;; + *) false ;; + esac +} + +# func_suncc_cstd_abi +# !!ONLY CALL THIS FOR SUN CC AFTER $compile_command IS FULLY EXPANDED!! +# Several compiler flags select an ABI that is incompatible with the +# Cstd library. Avoid specifying it if any are in CXXFLAGS. +func_suncc_cstd_abi () +{ + $debug_cmd + + case " $compile_command " in + *" -compat=g "*|*\ -std=c++[0-9][0-9]\ *|*" -library=stdcxx4 "*|*" -library=stlport4 "*) + suncc_use_cstd_abi=no + ;; + *) + suncc_use_cstd_abi=yes + ;; + esac +} + +# func_mode_link arg... +func_mode_link () +{ + $debug_cmd + + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + # It is impossible to link a dll without this setting, and + # we shouldn't force the makefile maintainer to figure out + # what system we are compiling for in order to pass an extra + # flag for every libtool invocation. + # allow_undefined=no + + # FIXME: Unfortunately, there are problems with the above when trying + # to make a dll that has undefined symbols, in which case not + # even a static library is built. For now, we need to specify + # -no-undefined on the libtool link line when we can be certain + # that all symbols are satisfied, otherwise we get a static library. + allow_undefined=yes + ;; + *) + allow_undefined=yes + ;; + esac + libtool_args=$nonopt + base_compile="$nonopt $@" + compile_command=$nonopt + finalize_command=$nonopt + + compile_rpath= + finalize_rpath= + compile_shlibpath= + finalize_shlibpath= + convenience= + old_convenience= + deplibs= + old_deplibs= + compiler_flags= + linker_flags= + dllsearchpath= + lib_search_path=`pwd` + inst_prefix_dir= + new_inherited_linker_flags= + + avoid_version=no + bindir= + dlfiles= + dlprefiles= + dlself=no + export_dynamic=no + export_symbols= + export_symbols_regex= + generated= + libobjs= + ltlibs= + module=no + no_install=no + objs= + os2dllname= + non_pic_objects= + precious_files_regex= + prefer_static_libs=no + preload=false + prev= + prevarg= + release= + rpath= + xrpath= + perm_rpath= + temp_rpath= + thread_safe=no + vinfo= + vinfo_number=no + weak_libs= + single_module=$wl-single_module + func_infer_tag $base_compile + + # We need to know -static, to get the right output filenames. + for arg + do + case $arg in + -shared) + test yes != "$build_libtool_libs" \ + && func_fatal_configuration "cannot build a shared library" + build_old_libs=no + break + ;; + -all-static | -static | -static-libtool-libs) + case $arg in + -all-static) + if test yes = "$build_libtool_libs" && test -z "$link_static_flag"; then + func_warning "complete static linking is impossible in this configuration" + fi + if test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + -static) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=built + ;; + -static-libtool-libs) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + esac + build_libtool_libs=no + build_old_libs=yes + break + ;; + esac + done + + # See if our shared archives depend on static archives. + test -n "$old_archive_from_new_cmds" && build_old_libs=yes + + # Go through the arguments, transforming them on the way. + while test "$#" -gt 0; do + arg=$1 + shift + func_quote_arg pretty,unquoted "$arg" + qarg=$func_quote_arg_unquoted_result + func_append libtool_args " $func_quote_arg_result" + + # If the previous option needs an argument, assign it. + if test -n "$prev"; then + case $prev in + output) + func_append compile_command " @OUTPUT@" + func_append finalize_command " @OUTPUT@" + ;; + esac + + case $prev in + bindir) + bindir=$arg + prev= + continue + ;; + dlfiles|dlprefiles) + $preload || { + # Add the symbol object into the linking commands. + func_append compile_command " @SYMFILE@" + func_append finalize_command " @SYMFILE@" + preload=: + } + case $arg in + *.la | *.lo) ;; # We handle these cases below. + force) + if test no = "$dlself"; then + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + self) + if test dlprefiles = "$prev"; then + dlself=yes + elif test dlfiles = "$prev" && test yes != "$dlopen_self"; then + dlself=yes + else + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + *) + if test dlfiles = "$prev"; then + func_append dlfiles " $arg" + else + func_append dlprefiles " $arg" + fi + prev= + continue + ;; + esac + ;; + expsyms) + export_symbols=$arg + test -f "$arg" \ + || func_fatal_error "symbol file '$arg' does not exist" + prev= + continue + ;; + expsyms_regex) + export_symbols_regex=$arg + prev= + continue + ;; + framework) + case $host in + *-*-darwin*) + case "$deplibs " in + *" $qarg.ltframework "*) ;; + *) func_append deplibs " $qarg.ltframework" # this is fixed later + ;; + esac + ;; + esac + prev= + continue + ;; + inst_prefix) + inst_prefix_dir=$arg + prev= + continue + ;; + mllvm) + # Clang does not use LLVM to link, so we can simply discard any + # '-mllvm $arg' options when doing the link step. + prev= + continue + ;; + objectlist) + if test -f "$arg"; then + save_arg=$arg + moreargs= + for fil in `cat "$save_arg"` + do +# func_append moreargs " $fil" + arg=$fil + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test none = "$pic_object" && + test none = "$non_pic_object"; then + func_fatal_error "cannot find name of object for '$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + if test none != "$pic_object"; then + # Prepend the subdirectory the object is found in. + pic_object=$xdir$pic_object + + if test dlfiles = "$prev"; then + if test yes = "$build_libtool_libs" && test yes = "$dlopen_support"; then + func_append dlfiles " $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test dlprefiles = "$prev"; then + # Preload the old-style object. + func_append dlprefiles " $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg=$pic_object + fi + + # Non-PIC object. + if test none != "$non_pic_object"; then + # Prepend the subdirectory the object is found in. + non_pic_object=$xdir$non_pic_object + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test none = "$pic_object"; then + arg=$non_pic_object + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object=$pic_object + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "'$arg' is not a valid libtool object" + fi + fi + done + else + func_fatal_error "link input file '$arg' does not exist" + fi + arg=$save_arg + prev= + continue + ;; + os2dllname) + os2dllname=$arg + prev= + continue + ;; + precious_regex) + precious_files_regex=$arg + prev= + continue + ;; + release) + release=-$arg + prev= + continue + ;; + rpath | xrpath) + # We need an absolute path. + case $arg in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + if test rpath = "$prev"; then + case "$rpath " in + *" $arg "*) ;; + *) func_append rpath " $arg" ;; + esac + else + case "$xrpath " in + *" $arg "*) ;; + *) func_append xrpath " $arg" ;; + esac + fi + prev= + continue + ;; + shrext) + shrext_cmds=$arg + prev= + continue + ;; + weak) + func_append weak_libs " $arg" + prev= + continue + ;; + xassembler) + func_append compiler_flags " -Xassembler $qarg" + prev= + func_append compile_command " -Xassembler $qarg" + func_append finalize_command " -Xassembler $qarg" + continue + ;; + xcclinker) + func_append linker_flags " $qarg" + func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xcompiler) + func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xlinker) + func_append linker_flags " $qarg" + func_append compiler_flags " $wl$qarg" + prev= + func_append compile_command " $wl$qarg" + func_append finalize_command " $wl$qarg" + continue + ;; + *) + eval "$prev=\"\$arg\"" + prev= + continue + ;; + esac + fi # test -n "$prev" + + prevarg=$arg + + case $arg in + -all-static) + if test -n "$link_static_flag"; then + # See comment for -static flag below, for more details. + func_append compile_command " $link_static_flag" + func_append finalize_command " $link_static_flag" + fi + continue + ;; + + -allow-undefined) + # FIXME: remove this flag sometime in the future. + func_fatal_error "'-allow-undefined' must not be used because it is the default" + ;; + + -avoid-version) + avoid_version=yes + continue + ;; + + -bindir) + prev=bindir + continue + ;; + + -dlopen) + prev=dlfiles + continue + ;; + + -dlpreopen) + prev=dlprefiles + continue + ;; + + -export-dynamic) + export_dynamic=yes + continue + ;; + + -export-symbols | -export-symbols-regex) + if test -n "$export_symbols" || test -n "$export_symbols_regex"; then + func_fatal_error "more than one -exported-symbols argument is not allowed" + fi + if test X-export-symbols = "X$arg"; then + prev=expsyms + else + prev=expsyms_regex + fi + continue + ;; + + -framework) + prev=framework + continue + ;; + + -inst-prefix-dir) + prev=inst_prefix + continue + ;; + + # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* + # so, if we see these flags be careful not to treat them like -L + -L[A-Z][A-Z]*:*) + case $with_gcc/$host in + no/*-*-irix* | /*-*-irix*) + func_append compile_command " $arg" + func_append finalize_command " $arg" + ;; + esac + continue + ;; + + -L*) + func_stripname "-L" '' "$arg" + if test -z "$func_stripname_result"; then + if test "$#" -gt 0; then + func_fatal_error "require no space between '-L' and '$1'" + else + func_fatal_error "need path for '-L' option" + fi + fi + func_resolve_sysroot "$func_stripname_result" + dir=$func_resolve_sysroot_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + absdir=`cd "$dir" && pwd` + test -z "$absdir" && \ + func_fatal_error "cannot determine absolute directory name of '$dir'" + dir=$absdir + ;; + esac + case "$deplibs " in + *" -L$dir "* | *" $arg "*) + # Will only happen for absolute or sysroot arguments + ;; + *) + # Preserve sysroot, but never include relative directories + case $dir in + [\\/]* | [A-Za-z]:[\\/]* | =*) func_append deplibs " $arg" ;; + *) func_append deplibs " -L$dir" ;; + esac + func_append lib_search_path " $dir" + ;; + esac + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`$ECHO "$dir" | $SED 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$dir:"*) ;; + ::) dllsearchpath=$dir;; + *) func_append dllsearchpath ":$dir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac + continue + ;; + + -l*) + if test X-lc = "X$arg" || test X-lm = "X$arg"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc* | *-*-haiku*) + # These systems don't actually have a C or math library (as such) + continue + ;; + *-*-os2*) + # These systems don't actually have a C library (as such) + test X-lc = "X$arg" && continue + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-bitrig* | *-*-midnightbsd*) + # Do not include libc due to us having libc/libc_r. + test X-lc = "X$arg" && continue + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C and math libraries are in the System framework + func_append deplibs " System.ltframework" + continue + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + test X-lc = "X$arg" && continue + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + test X-lc = "X$arg" && continue + ;; + esac + elif test X-lc_r = "X$arg"; then + case $host in + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-bitrig* | *-*-midnightbsd*) + # Do not include libc_r directly, use -pthread flag. + continue + ;; + esac + fi + func_append deplibs " $arg" + continue + ;; + + -mllvm) + prev=mllvm + continue + ;; + + -module) + module=yes + continue + ;; + + # Tru64 UNIX uses -model [arg] to determine the layout of C++ + # classes, name mangling, and exception handling. + # Darwin uses the -arch flag to determine output architecture. + -model|-arch|-isysroot|--sysroot) + func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + prev=xcompiler + continue + ;; + # Solaris ld rejects as of 11.4. Refer to Oracle bug 22985199. + -pthread) + case $host in + *solaris2*) ;; + *) + case "$new_inherited_linker_flags " in + *" $arg "*) ;; + * ) func_append new_inherited_linker_flags " $arg" ;; + esac + ;; + esac + continue + ;; + -mt|-mthreads|-kthread|-Kthread|-pthreads|--thread-safe \ + |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) + func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + case "$new_inherited_linker_flags " in + *" $arg "*) ;; + * ) func_append new_inherited_linker_flags " $arg" ;; + esac + continue + ;; + + -multi_module) + single_module=$wl-multi_module + continue + ;; + + -no-fast-install) + fast_install=no + continue + ;; + + -no-install) + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*) + # The PATH hackery in wrapper scripts is required on Windows + # and Darwin in order for the loader to find any dlls it needs. + func_warning "'-no-install' is ignored for $host" + func_warning "assuming '-no-fast-install' instead" + fast_install=no + ;; + *) no_install=yes ;; + esac + continue + ;; + + -no-undefined) + allow_undefined=no + continue + ;; + + -objectlist) + prev=objectlist + continue + ;; + + -os2dllname) + prev=os2dllname + continue + ;; + + -o) prev=output ;; + + -precious-files-regex) + prev=precious_regex + continue + ;; + + -release) + prev=release + continue + ;; + + -rpath) + prev=rpath + continue + ;; + + -R) + prev=xrpath + continue + ;; + + -R*) + func_stripname '-R' '' "$arg" + dir=$func_stripname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + =*) + func_stripname '=' '' "$dir" + dir=$lt_sysroot$func_stripname_result + ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + case "$xrpath " in + *" $dir "*) ;; + *) func_append xrpath " $dir" ;; + esac + continue + ;; + + -shared) + # The effects of -shared are defined in a previous loop. + continue + ;; + + -shrext) + prev=shrext + continue + ;; + + -static | -static-libtool-libs) + # The effects of -static are defined in a previous loop. + # We used to do the same as -all-static on platforms that + # didn't have a PIC flag, but the assumption that the effects + # would be equivalent was wrong. It would break on at least + # Digital Unix and AIX. + continue + ;; + + -thread-safe) + thread_safe=yes + continue + ;; + + -version-info) + prev=vinfo + continue + ;; + + -version-number) + prev=vinfo + vinfo_number=yes + continue + ;; + + -weak) + prev=weak + continue + ;; + + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs=$IFS; IFS=, + for flag in $args; do + IFS=$save_ifs + func_quote_arg pretty "$flag" + func_append arg " $func_quote_arg_result" + func_append compiler_flags " $func_quote_arg_result" + done + IFS=$save_ifs + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Wl,*) + func_stripname '-Wl,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs=$IFS; IFS=, + for flag in $args; do + IFS=$save_ifs + func_quote_arg pretty "$flag" + func_append arg " $wl$func_quote_arg_result" + func_append compiler_flags " $wl$func_quote_arg_result" + func_append linker_flags " $func_quote_arg_result" + done + IFS=$save_ifs + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Xassembler) + prev=xassembler + continue + ;; + + -Xcompiler) + prev=xcompiler + continue + ;; + + -Xlinker) + prev=xlinker + continue + ;; + + -XCClinker) + prev=xcclinker + continue + ;; + + # -msg_* for osf cc + -msg_*) + func_quote_arg pretty "$arg" + arg=$func_quote_arg_result + ;; + + # Flags to be passed through unchanged, with rationale: + # -64, -mips[0-9] enable 64-bit mode for the SGI compiler + # -r[0-9][0-9]* specify processor for the SGI compiler + # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler + # +DA*, +DD* enable 64-bit mode for the HP compiler + # -q* compiler args for the IBM compiler + # -m*, -t[45]*, -txscale* architecture-specific flags for GCC + # -F/path path to uninstalled frameworks, gcc on darwin + # -p, -pg, --coverage, -fprofile-* profiling flags for GCC + # -fstack-protector* stack protector flags for GCC + # @file GCC response files + # -tp=* Portland pgcc target processor selection + # --sysroot=* for sysroot support + # -O*, -g*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization + # -specs=* GCC specs files + # -stdlib=* select c++ std lib with clang + # -fsanitize=* Clang/GCC memory and address sanitizer + # -fuse-ld=* Linker select flags for GCC + # -Wa,* Pass flags directly to the assembler + -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ + -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \ + -O*|-g*|-flto*|-fwhopr*|-fuse-linker-plugin|-fstack-protector*|-stdlib=*| \ + -specs=*|-fsanitize=*|-fuse-ld=*|-Wa,*) + func_quote_arg pretty "$arg" + arg=$func_quote_arg_result + func_append compile_command " $arg" + func_append finalize_command " $arg" + func_append compiler_flags " $arg" + continue + ;; + + -Z*) + if test os2 = "`expr $host : '.*\(os2\)'`"; then + # OS/2 uses -Zxxx to specify OS/2-specific options + compiler_flags="$compiler_flags $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + case $arg in + -Zlinker | -Zstack) + prev=xcompiler + ;; + esac + continue + else + # Otherwise treat like 'Some other compiler flag' below + func_quote_arg pretty "$arg" + arg=$func_quote_arg_result + fi + ;; + + # Some other compiler flag. + -* | +*) + func_quote_arg pretty "$arg" + arg=$func_quote_arg_result + ;; + + *.$objext) + # A standard object. + func_append objs " $arg" + ;; + + *.lo) + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test none = "$pic_object" && + test none = "$non_pic_object"; then + func_fatal_error "cannot find name of object for '$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + test none = "$pic_object" || { + # Prepend the subdirectory the object is found in. + pic_object=$xdir$pic_object + + if test dlfiles = "$prev"; then + if test yes = "$build_libtool_libs" && test yes = "$dlopen_support"; then + func_append dlfiles " $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test dlprefiles = "$prev"; then + # Preload the old-style object. + func_append dlprefiles " $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg=$pic_object + } + + # Non-PIC object. + if test none != "$non_pic_object"; then + # Prepend the subdirectory the object is found in. + non_pic_object=$xdir$non_pic_object + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test none = "$pic_object"; then + arg=$non_pic_object + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object=$pic_object + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "'$arg' is not a valid libtool object" + fi + fi + ;; + + *.$libext) + # An archive. + func_append deplibs " $arg" + func_append old_deplibs " $arg" + continue + ;; + + *.la) + # A libtool-controlled library. + + func_resolve_sysroot "$arg" + if test dlfiles = "$prev"; then + # This library was specified with -dlopen. + func_append dlfiles " $func_resolve_sysroot_result" + prev= + elif test dlprefiles = "$prev"; then + # The library was specified with -dlpreopen. + func_append dlprefiles " $func_resolve_sysroot_result" + prev= + else + func_append deplibs " $func_resolve_sysroot_result" + fi + continue + ;; + + # Some other compiler argument. + *) + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + func_quote_arg pretty "$arg" + arg=$func_quote_arg_result + ;; + esac # arg + + # Now actually substitute the argument into the commands. + if test -n "$arg"; then + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + done # argument parsing loop + + test -n "$prev" && \ + func_fatal_help "the '$prevarg' option requires an argument" + + if test yes = "$export_dynamic" && test -n "$export_dynamic_flag_spec"; then + eval arg=\"$export_dynamic_flag_spec\" + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + + oldlibs= + # calculate the name of the file, without its directory + func_basename "$output" + outputname=$func_basename_result + libobjs_save=$libobjs + + if test -n "$shlibpath_var"; then + # get the directories listed in $shlibpath_var + eval shlib_search_path=\`\$ECHO \"\$$shlibpath_var\" \| \$SED \'s/:/ /g\'\` + else + shlib_search_path= + fi + eval sys_lib_search_path=\"$sys_lib_search_path_spec\" + eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" + + # Definition is injected by LT_CONFIG during libtool generation. + func_munge_path_list sys_lib_dlsearch_path "$LT_SYS_LIBRARY_PATH" + + func_dirname "$output" "/" "" + output_objdir=$func_dirname_result$objdir + func_to_tool_file "$output_objdir/" + tool_output_objdir=$func_to_tool_file_result + # Create the object directory. + func_mkdir_p "$output_objdir" + + # Determine the type of output + case $output in + "") + func_fatal_help "you must specify an output file" + ;; + *.$libext) linkmode=oldlib ;; + *.lo | *.$objext) linkmode=obj ;; + *.la) linkmode=lib ;; + *) linkmode=prog ;; # Anything else should be a program. + esac + + specialdeplibs= + + libs= + # Find all interdependent deplibs by searching for libraries + # that are linked more than once (e.g. -la -lb -la) + for deplib in $deplibs; do + if $opt_preserve_dup_deps; then + case "$libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append libs " $deplib" + done + + if test lib = "$linkmode"; then + libs="$predeps $libs $compiler_lib_search_path $postdeps" + + # Compute libraries that are listed more than once in $predeps + # $postdeps and mark them as special (i.e., whose duplicates are + # not to be eliminated). + pre_post_deps= + if $opt_duplicate_compiler_generated_deps; then + for pre_post_dep in $predeps $postdeps; do + case "$pre_post_deps " in + *" $pre_post_dep "*) func_append specialdeplibs " $pre_post_deps" ;; + esac + func_append pre_post_deps " $pre_post_dep" + done + fi + pre_post_deps= + fi + + deplibs= + newdependency_libs= + newlib_search_path= + need_relink=no # whether we're linking any uninstalled libtool libraries + notinst_deplibs= # not-installed libtool libraries + notinst_path= # paths that contain not-installed libtool libraries + + case $linkmode in + lib) + passes="conv dlpreopen link" + for file in $dlfiles $dlprefiles; do + case $file in + *.la) ;; + *) + func_fatal_help "libraries can '-dlopen' only libtool libraries: $file" + ;; + esac + done + ;; + prog) + compile_deplibs= + finalize_deplibs= + alldeplibs=false + newdlfiles= + newdlprefiles= + passes="conv scan dlopen dlpreopen link" + ;; + *) passes="conv" + ;; + esac + + for pass in $passes; do + # The preopen pass in lib mode reverses $deplibs; put it back here + # so that -L comes before libs that need it for instance... + if test lib,link = "$linkmode,$pass"; then + ## FIXME: Find the place where the list is rebuilt in the wrong + ## order, and fix it there properly + tmp_deplibs= + for deplib in $deplibs; do + tmp_deplibs="$deplib $tmp_deplibs" + done + deplibs=$tmp_deplibs + fi + + if test lib,link = "$linkmode,$pass" || + test prog,scan = "$linkmode,$pass"; then + libs=$deplibs + deplibs= + fi + if test prog = "$linkmode"; then + case $pass in + dlopen) libs=$dlfiles ;; + dlpreopen) libs=$dlprefiles ;; + link) libs="$deplibs %DEPLIBS% $dependency_libs" ;; + esac + fi + if test lib,dlpreopen = "$linkmode,$pass"; then + # Collect and forward deplibs of preopened libtool libs + for lib in $dlprefiles; do + # Ignore non-libtool-libs + dependency_libs= + func_resolve_sysroot "$lib" + case $lib in + *.la) func_source "$func_resolve_sysroot_result" ;; + esac + + # Collect preopened libtool deplibs, except any this library + # has declared as weak libs + for deplib in $dependency_libs; do + func_basename "$deplib" + deplib_base=$func_basename_result + case " $weak_libs " in + *" $deplib_base "*) ;; + *) func_append deplibs " $deplib" ;; + esac + done + done + libs=$dlprefiles + fi + if test dlopen = "$pass"; then + # Collect dlpreopened libraries + save_deplibs=$deplibs + deplibs= + fi + + for deplib in $libs; do + lib= + found=false + case $deplib in + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ + |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + func_append compiler_flags " $deplib" + if test lib = "$linkmode"; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi + continue + ;; + -l*) + if test lib != "$linkmode" && test prog != "$linkmode"; then + func_warning "'-l' is ignored for archives/objects" + continue + fi + func_stripname '-l' '' "$deplib" + name=$func_stripname_result + if test lib = "$linkmode"; then + searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path" + else + searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path" + fi + for searchdir in $searchdirs; do + for search_ext in .la $std_shrext .so .a; do + # Search the libtool library + lib=$searchdir/lib$name$search_ext + if test -f "$lib"; then + if test .la = "$search_ext"; then + found=: + else + found=false + fi + break 2 + fi + done + done + if $found; then + # deplib is a libtool library + # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, + # We need to do some special things here, and not later. + if test yes = "$allow_libtool_libs_with_static_runtimes"; then + case " $predeps $postdeps " in + *" $deplib "*) + if func_lalib_p "$lib"; then + library_names= + old_library= + func_source "$lib" + for l in $old_library $library_names; do + ll=$l + done + if test "X$ll" = "X$old_library"; then # only static version available + found=false + func_dirname "$lib" "" "." + ladir=$func_dirname_result + lib=$ladir/$old_library + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test lib = "$linkmode" && newdependency_libs="$deplib $newdependency_libs" + fi + continue + fi + fi + ;; + *) ;; + esac + fi + else + # deplib doesn't seem to be a libtool library + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test lib = "$linkmode" && newdependency_libs="$deplib $newdependency_libs" + fi + continue + fi + ;; # -l + *.ltframework) + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + if test lib = "$linkmode"; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi + continue + ;; + -L*) + case $linkmode in + lib) + deplibs="$deplib $deplibs" + test conv = "$pass" && continue + newdependency_libs="$deplib $newdependency_libs" + func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + prog) + if test conv = "$pass"; then + deplibs="$deplib $deplibs" + continue + fi + if test scan = "$pass"; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + *) + func_warning "'-L' is ignored for archives/objects" + ;; + esac # linkmode + continue + ;; # -L + -R*) + if test link = "$pass"; then + func_stripname '-R' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + dir=$func_resolve_sysroot_result + # Make sure the xrpath contains only unique directories. + case "$xrpath " in + *" $dir "*) ;; + *) func_append xrpath " $dir" ;; + esac + fi + deplibs="$deplib $deplibs" + continue + ;; + *.la) + func_resolve_sysroot "$deplib" + lib=$func_resolve_sysroot_result + ;; + *.$libext) + if test conv = "$pass"; then + deplibs="$deplib $deplibs" + continue + fi + case $linkmode in + lib) + # Linking convenience modules into shared libraries is allowed, + # but linking other static libraries is non-portable. + case " $dlpreconveniencelibs " in + *" $deplib "*) ;; + *) + valid_a_lib=false + case $deplibs_check_method in + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + if eval "\$ECHO \"$deplib\"" 2>/dev/null | $SED 10q \ + | $EGREP "$match_pattern_regex" > /dev/null; then + valid_a_lib=: + fi + ;; + pass_all) + valid_a_lib=: + ;; + esac + if $valid_a_lib; then + echo + $ECHO "*** Warning: Linking the shared library $output against the" + $ECHO "*** static library $deplib is not portable!" + deplibs="$deplib $deplibs" + else + echo + $ECHO "*** Warning: Trying to link with static lib archive $deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because the file extensions .$libext of this argument makes me believe" + echo "*** that it is just a static archive that I should not use here." + fi + ;; + esac + continue + ;; + prog) + if test link != "$pass"; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + continue + ;; + esac # linkmode + ;; # *.$libext + *.lo | *.$objext) + if test conv = "$pass"; then + deplibs="$deplib $deplibs" + elif test prog = "$linkmode"; then + if test dlpreopen = "$pass" || test yes != "$dlopen_support" || test no = "$build_libtool_libs"; then + # If there is no dlopen support or we're linking statically, + # we need to preload. + func_append newdlprefiles " $deplib" + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + func_append newdlfiles " $deplib" + fi + fi + continue + ;; + %DEPLIBS%) + alldeplibs=: + continue + ;; + esac # case $deplib + + $found || test -f "$lib" \ + || func_fatal_error "cannot find the library '$lib' or unhandled argument '$deplib'" + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$lib" \ + || func_fatal_error "'$lib' is not a valid libtool archive" + + func_dirname "$lib" "" "." + ladir=$func_dirname_result + + dlname= + dlopen= + dlpreopen= + libdir= + library_names= + old_library= + inherited_linker_flags= + # If the library was installed with an old release of libtool, + # it will not redefine variables installed, or shouldnotlink + installed=yes + shouldnotlink=no + avoidtemprpath= + + + # Read the .la file + func_source "$lib" + + # Convert "-framework foo" to "foo.ltframework" + if test -n "$inherited_linker_flags"; then + tmp_inherited_linker_flags=`$ECHO "$inherited_linker_flags" | $SED 's/-framework \([^ $]*\)/\1.ltframework/g'` + for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do + case " $new_inherited_linker_flags " in + *" $tmp_inherited_linker_flag "*) ;; + *) func_append new_inherited_linker_flags " $tmp_inherited_linker_flag";; + esac + done + fi + dependency_libs=`$ECHO " $dependency_libs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + if test lib,link = "$linkmode,$pass" || + test prog,scan = "$linkmode,$pass" || + { test prog != "$linkmode" && test lib != "$linkmode"; }; then + test -n "$dlopen" && func_append dlfiles " $dlopen" + test -n "$dlpreopen" && func_append dlprefiles " $dlpreopen" + fi + + if test conv = "$pass"; then + # Only check for convenience libraries + deplibs="$lib $deplibs" + if test -z "$libdir"; then + if test -z "$old_library"; then + func_fatal_error "cannot find name of link library for '$lib'" + fi + # It is a libtool convenience library, so add in its objects. + func_append convenience " $ladir/$objdir/$old_library" + func_append old_convenience " $ladir/$objdir/$old_library" + elif test prog != "$linkmode" && test lib != "$linkmode"; then + func_fatal_error "'$lib' is not a convenience library" + fi + tmp_libs= + for deplib in $dependency_libs; do + deplibs="$deplib $deplibs" + if $opt_preserve_dup_deps; then + case "$tmp_libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append tmp_libs " $deplib" + done + continue + fi # $pass = conv + + + # Get the name of the library we link against. + linklib= + if test -n "$old_library" && + { test yes = "$prefer_static_libs" || + test built,no = "$prefer_static_libs,$installed"; }; then + linklib=$old_library + else + for l in $old_library $library_names; do + linklib=$l + done + fi + if test -z "$linklib"; then + func_fatal_error "cannot find name of link library for '$lib'" + fi + + # This library was specified with -dlopen. + if test dlopen = "$pass"; then + test -z "$libdir" \ + && func_fatal_error "cannot -dlopen a convenience library: '$lib'" + if test -z "$dlname" || + test yes != "$dlopen_support" || + test no = "$build_libtool_libs" + then + # If there is no dlname, no dlopen support or we're linking + # statically, we need to preload. We also need to preload any + # dependent libraries so libltdl's deplib preloader doesn't + # bomb out in the load deplibs phase. + func_append dlprefiles " $lib $dependency_libs" + else + func_append newdlfiles " $lib" + fi + continue + fi # $pass = dlopen + + # We need an absolute path. + case $ladir in + [\\/]* | [A-Za-z]:[\\/]*) abs_ladir=$ladir ;; + *) + abs_ladir=`cd "$ladir" && pwd` + if test -z "$abs_ladir"; then + func_warning "cannot determine absolute directory name of '$ladir'" + func_warning "passing it literally to the linker, although it might fail" + abs_ladir=$ladir + fi + ;; + esac + func_basename "$lib" + laname=$func_basename_result + + # Find the relevant object directory and library name. + if test yes = "$installed"; then + if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then + func_warning "library '$lib' was moved." + dir=$ladir + absdir=$abs_ladir + libdir=$abs_ladir + else + dir=$lt_sysroot$libdir + absdir=$lt_sysroot$libdir + fi + test yes = "$hardcode_automatic" && avoidtemprpath=yes + else + if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then + dir=$ladir + absdir=$abs_ladir + # Remove this search path later + func_append notinst_path " $abs_ladir" + else + dir=$ladir/$objdir + absdir=$abs_ladir/$objdir + # Remove this search path later + func_append notinst_path " $abs_ladir" + fi + fi # $installed = yes + func_stripname 'lib' '.la' "$laname" + name=$func_stripname_result + + # This library was specified with -dlpreopen. + if test dlpreopen = "$pass"; then + if test -z "$libdir" && test prog = "$linkmode"; then + func_fatal_error "only libraries may -dlpreopen a convenience library: '$lib'" + fi + case $host in + # special handling for platforms with PE-DLLs. + *cygwin* | *mingw* | *cegcc* ) + # Linker will automatically link against shared library if both + # static and shared are present. Therefore, ensure we extract + # symbols from the import library if a shared library is present + # (otherwise, the dlopen module name will be incorrect). We do + # this by putting the import library name into $newdlprefiles. + # We recover the dlopen module name by 'saving' the la file + # name in a special purpose variable, and (later) extracting the + # dlname from the la file. + if test -n "$dlname"; then + func_tr_sh "$dir/$linklib" + eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname" + func_append newdlprefiles " $dir/$linklib" + else + func_append newdlprefiles " $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + func_append dlpreconveniencelibs " $dir/$old_library" + fi + ;; + * ) + # Prefer using a static library (so that no silly _DYNAMIC symbols + # are required to link). + if test -n "$old_library"; then + func_append newdlprefiles " $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + func_append dlpreconveniencelibs " $dir/$old_library" + # Otherwise, use the dlname, so that lt_dlopen finds it. + elif test -n "$dlname"; then + func_append newdlprefiles " $dir/$dlname" + else + func_append newdlprefiles " $dir/$linklib" + fi + ;; + esac + fi # $pass = dlpreopen + + if test -z "$libdir"; then + # Link the convenience library + if test lib = "$linkmode"; then + deplibs="$dir/$old_library $deplibs" + elif test prog,link = "$linkmode,$pass"; then + compile_deplibs="$dir/$old_library $compile_deplibs" + finalize_deplibs="$dir/$old_library $finalize_deplibs" + else + deplibs="$lib $deplibs" # used for prog,scan pass + fi + continue + fi + + + if test prog = "$linkmode" && test link != "$pass"; then + func_append newlib_search_path " $ladir" + deplibs="$lib $deplibs" + + linkalldeplibs=false + if test no != "$link_all_deplibs" || test -z "$library_names" || + test no = "$build_libtool_libs"; then + linkalldeplibs=: + fi + + tmp_libs= + for deplib in $dependency_libs; do + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + esac + # Need to link against all dependency_libs? + if $linkalldeplibs; then + deplibs="$deplib $deplibs" + else + # Need to hardcode shared library paths + # or/and link against static libraries + newdependency_libs="$deplib $newdependency_libs" + fi + if $opt_preserve_dup_deps; then + case "$tmp_libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append tmp_libs " $deplib" + done # for deplib + continue + fi # $linkmode = prog... + + if test prog,link = "$linkmode,$pass"; then + if test -n "$library_names" && + { { test no = "$prefer_static_libs" || + test built,yes = "$prefer_static_libs,$installed"; } || + test -z "$old_library"; }; then + # We need to hardcode the library path + if test -n "$shlibpath_var" && test -z "$avoidtemprpath"; then + # Make sure the rpath contains only unique directories. + case $temp_rpath: in + *"$absdir:"*) ;; + *) func_append temp_rpath "$absdir:" ;; + esac + fi + + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac + fi # $linkmode,$pass = prog,link... + + if $alldeplibs && + { test pass_all = "$deplibs_check_method" || + { test yes = "$build_libtool_libs" && + test -n "$library_names"; }; }; then + # We only need to search for static libraries + continue + fi + fi + + link_static=no # Whether the deplib will be linked statically + use_static_libs=$prefer_static_libs + if test built = "$use_static_libs" && test yes = "$installed"; then + use_static_libs=no + fi + if test -n "$library_names" && + { test no = "$use_static_libs" || test -z "$old_library"; }; then + case $host in + *cygwin* | *mingw* | *cegcc* | *os2*) + # No point in relinking DLLs because paths are not encoded + func_append notinst_deplibs " $lib" + need_relink=no + ;; + *) + if test no = "$installed"; then + func_append notinst_deplibs " $lib" + need_relink=yes + fi + ;; + esac + # This is a shared library + + # Warn about portability, can't link against -module's on some + # systems (darwin). Don't bleat about dlopened modules though! + dlopenmodule= + for dlpremoduletest in $dlprefiles; do + if test "X$dlpremoduletest" = "X$lib"; then + dlopenmodule=$dlpremoduletest + break + fi + done + if test -z "$dlopenmodule" && test yes = "$shouldnotlink" && test link = "$pass"; then + echo + if test prog = "$linkmode"; then + $ECHO "*** Warning: Linking the executable $output against the loadable module" + else + $ECHO "*** Warning: Linking the shared library $output against the loadable module" + fi + $ECHO "*** $linklib is not portable!" + fi + if test lib = "$linkmode" && + test yes = "$hardcode_into_libs"; then + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac + fi + + if test -n "$old_archive_from_expsyms_cmds"; then + # figure out the soname + set dummy $library_names + shift + realname=$1 + shift + libname=`eval "\\$ECHO \"$libname_spec\""` + # use dlname if we got it. it's perfectly good, no? + if test -n "$dlname"; then + soname=$dlname + elif test -n "$soname_spec"; then + # bleh windows + case $host in + *cygwin* | mingw* | *cegcc* | *os2*) + func_arith $current - $age + major=$func_arith_result + versuffix=-$major + ;; + esac + eval soname=\"$soname_spec\" + else + soname=$realname + fi + + # Make a new name for the extract_expsyms_cmds to use + soroot=$soname + func_basename "$soroot" + soname=$func_basename_result + func_stripname 'lib' '.dll' "$soname" + newlib=libimp-$func_stripname_result.a + + # If the library has no export list, then create one now + if test -f "$output_objdir/$soname-def"; then : + else + func_verbose "extracting exported symbol list from '$soname'" + func_execute_cmds "$extract_expsyms_cmds" 'exit $?' + fi + + # Create $newlib + if test -f "$output_objdir/$newlib"; then :; else + func_verbose "generating import library for '$soname'" + func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?' + fi + # make sure the library variables are pointing to the new library + dir=$output_objdir + linklib=$newlib + fi # test -n "$old_archive_from_expsyms_cmds" + + if test prog = "$linkmode" || test relink != "$opt_mode"; then + add_shlibpath= + add_dir= + add= + lib_linked=yes + case $hardcode_action in + immediate | unsupported) + if test no = "$hardcode_direct"; then + add=$dir/$linklib + case $host in + *-*-sco3.2v5.0.[024]*) add_dir=-L$dir ;; + *-*-sysv4*uw2*) add_dir=-L$dir ;; + *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ + *-*-unixware7*) add_dir=-L$dir ;; + *-*-darwin* ) + # if the lib is a (non-dlopened) module then we cannot + # link against it, someone is ignoring the earlier warnings + if /usr/bin/file -L $add 2> /dev/null | + $GREP ": [^:]* bundle" >/dev/null; then + if test "X$dlopenmodule" != "X$lib"; then + $ECHO "*** Warning: lib $linklib is a module, not a shared library" + if test -z "$old_library"; then + echo + echo "*** And there doesn't seem to be a static archive available" + echo "*** The link will probably fail, sorry" + else + add=$dir/$old_library + fi + elif test -n "$old_library"; then + add=$dir/$old_library + fi + fi + esac + elif test no = "$hardcode_minus_L"; then + case $host in + *-*-sunos*) add_shlibpath=$dir ;; + esac + add_dir=-L$dir + add=-l$name + elif test no = "$hardcode_shlibpath_var"; then + add_shlibpath=$dir + add=-l$name + else + lib_linked=no + fi + ;; + relink) + if test yes = "$hardcode_direct" && + test no = "$hardcode_direct_absolute"; then + add=$dir/$linklib + elif test yes = "$hardcode_minus_L"; then + add_dir=-L$absdir + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi + add=-l$name + elif test yes = "$hardcode_shlibpath_var"; then + add_shlibpath=$dir + add=-l$name + else + lib_linked=no + fi + ;; + *) lib_linked=no ;; + esac + + if test yes != "$lib_linked"; then + func_fatal_configuration "unsupported hardcode properties" + fi + + if test -n "$add_shlibpath"; then + case :$compile_shlibpath: in + *":$add_shlibpath:"*) ;; + *) func_append compile_shlibpath "$add_shlibpath:" ;; + esac + fi + if test prog = "$linkmode"; then + test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" + test -n "$add" && compile_deplibs="$add $compile_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + if test yes != "$hardcode_direct" && + test yes != "$hardcode_minus_L" && + test yes = "$hardcode_shlibpath_var"; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) func_append finalize_shlibpath "$libdir:" ;; + esac + fi + fi + fi + + if test prog = "$linkmode" || test relink = "$opt_mode"; then + add_shlibpath= + add_dir= + add= + # Finalize command for both is simple: just hardcode it. + if test yes = "$hardcode_direct" && + test no = "$hardcode_direct_absolute"; then + add=$libdir/$linklib + elif test yes = "$hardcode_minus_L"; then + add_dir=-L$libdir + add=-l$name + elif test yes = "$hardcode_shlibpath_var"; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) func_append finalize_shlibpath "$libdir:" ;; + esac + add=-l$name + elif test yes = "$hardcode_automatic"; then + if test -n "$inst_prefix_dir" && + test -f "$inst_prefix_dir$libdir/$linklib"; then + add=$inst_prefix_dir$libdir/$linklib + else + add=$libdir/$linklib + fi + else + # We cannot seem to hardcode it, guess we'll fake it. + add_dir=-L$libdir + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi + add=-l$name + fi + + if test prog = "$linkmode"; then + test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" + test -n "$add" && finalize_deplibs="$add $finalize_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + fi + fi + elif test prog = "$linkmode"; then + # Here we assume that one of hardcode_direct or hardcode_minus_L + # is not unsupported. This is valid on all known static and + # shared platforms. + if test unsupported != "$hardcode_direct"; then + test -n "$old_library" && linklib=$old_library + compile_deplibs="$dir/$linklib $compile_deplibs" + finalize_deplibs="$dir/$linklib $finalize_deplibs" + else + compile_deplibs="-l$name -L$dir $compile_deplibs" + finalize_deplibs="-l$name -L$dir $finalize_deplibs" + fi + elif test yes = "$build_libtool_libs"; then + # Not a shared library + if test pass_all != "$deplibs_check_method"; then + # We're trying link a shared library against a static one + # but the system doesn't support it. + + # Just print a warning and add the library to dependency_libs so + # that the program can be linked against the static library. + echo + $ECHO "*** Warning: This system cannot link to static lib archive $lib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have." + if test yes = "$module"; then + echo "*** But as you try to build a module library, libtool will still create " + echo "*** a static module, that should work as long as the dlopening application" + echo "*** is linked with the -dlopen flag to resolve symbols at runtime." + if test -z "$global_symbol_pipe"; then + echo + echo "*** However, this would only work if libtool was able to extract symbol" + echo "*** lists from a program, using 'nm' or equivalent, but libtool could" + echo "*** not find such a program. So, this module is probably useless." + echo "*** 'nm' from GNU binutils and a full rebuild may help." + fi + if test no = "$build_old_libs"; then + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + else + deplibs="$dir/$old_library $deplibs" + link_static=yes + fi + fi # link shared/static library? + + if test lib = "$linkmode"; then + if test -n "$dependency_libs" && + { test yes != "$hardcode_into_libs" || + test yes = "$build_old_libs" || + test yes = "$link_static"; }; then + # Extract -R from dependency_libs + temp_deplibs= + for libdir in $dependency_libs; do + case $libdir in + -R*) func_stripname '-R' '' "$libdir" + temp_xrpath=$func_stripname_result + case " $xrpath " in + *" $temp_xrpath "*) ;; + *) func_append xrpath " $temp_xrpath";; + esac;; + *) func_append temp_deplibs " $libdir";; + esac + done + dependency_libs=$temp_deplibs + fi + + func_append newlib_search_path " $absdir" + # Link against this library + test no = "$link_static" && newdependency_libs="$abs_ladir/$laname $newdependency_libs" + # ... and its dependency_libs + tmp_libs= + for deplib in $dependency_libs; do + newdependency_libs="$deplib $newdependency_libs" + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result";; + *) func_resolve_sysroot "$deplib" ;; + esac + if $opt_preserve_dup_deps; then + case "$tmp_libs " in + *" $func_resolve_sysroot_result "*) + func_append specialdeplibs " $func_resolve_sysroot_result" ;; + esac + fi + func_append tmp_libs " $func_resolve_sysroot_result" + done + + if test no != "$link_all_deplibs"; then + # Add the search paths of all dependency libraries + for deplib in $dependency_libs; do + path= + case $deplib in + -L*) path=$deplib ;; + *.la) + func_resolve_sysroot "$deplib" + deplib=$func_resolve_sysroot_result + func_dirname "$deplib" "" "." + dir=$func_dirname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) absdir=$dir ;; + *) + absdir=`cd "$dir" && pwd` + if test -z "$absdir"; then + func_warning "cannot determine absolute directory name of '$dir'" + absdir=$dir + fi + ;; + esac + if $GREP "^installed=no" $deplib > /dev/null; then + case $host in + *-*-darwin*) + depdepl= + eval deplibrary_names=`$SED -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` + if test -n "$deplibrary_names"; then + for tmp in $deplibrary_names; do + depdepl=$tmp + done + if test -f "$absdir/$objdir/$depdepl"; then + depdepl=$absdir/$objdir/$depdepl + darwin_install_name=`$OTOOL -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + if test -z "$darwin_install_name"; then + darwin_install_name=`$OTOOL64 -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + fi + func_append compiler_flags " $wl-dylib_file $wl$darwin_install_name:$depdepl" + func_append linker_flags " -dylib_file $darwin_install_name:$depdepl" + path= + fi + fi + ;; + *) + path=-L$absdir/$objdir + ;; + esac + else + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + test -z "$libdir" && \ + func_fatal_error "'$deplib' is not a valid libtool archive" + test "$absdir" != "$libdir" && \ + func_warning "'$deplib' seems to be moved" + + path=-L$absdir + fi + ;; + esac + case " $deplibs " in + *" $path "*) ;; + *) deplibs="$path $deplibs" ;; + esac + done + fi # link_all_deplibs != no + fi # linkmode = lib + done # for deplib in $libs + if test link = "$pass"; then + if test prog = "$linkmode"; then + compile_deplibs="$new_inherited_linker_flags $compile_deplibs" + finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs" + else + compiler_flags="$compiler_flags "`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + fi + fi + dependency_libs=$newdependency_libs + if test dlpreopen = "$pass"; then + # Link the dlpreopened libraries before other libraries + for deplib in $save_deplibs; do + deplibs="$deplib $deplibs" + done + fi + if test dlopen != "$pass"; then + test conv = "$pass" || { + # Make sure lib_search_path contains only unique directories. + lib_search_path= + for dir in $newlib_search_path; do + case "$lib_search_path " in + *" $dir "*) ;; + *) func_append lib_search_path " $dir" ;; + esac + done + newlib_search_path= + } + + if test prog,link = "$linkmode,$pass"; then + vars="compile_deplibs finalize_deplibs" + else + vars=deplibs + fi + for var in $vars dependency_libs; do + # Add libraries to $var in reverse order + eval tmp_libs=\"\$$var\" + new_libs= + for deplib in $tmp_libs; do + # FIXME: Pedantically, this is the right thing to do, so + # that some nasty dependency loop isn't accidentally + # broken: + #new_libs="$deplib $new_libs" + # Pragmatically, this seems to cause very few problems in + # practice: + case $deplib in + -L*) new_libs="$deplib $new_libs" ;; + -R*) ;; + *) + # And here is the reason: when a library appears more + # than once as an explicit dependence of a library, or + # is implicitly linked in more than once by the + # compiler, it is considered special, and multiple + # occurrences thereof are not removed. Compare this + # with having the same library being listed as a + # dependency of multiple other libraries: in this case, + # we know (pedantically, we assume) the library does not + # need to be listed more than once, so we keep only the + # last copy. This is not always right, but it is rare + # enough that we require users that really mean to play + # such unportable linking tricks to link the library + # using -Wl,-lname, so that libtool does not consider it + # for duplicate removal. + case " $specialdeplibs " in + *" $deplib "*) new_libs="$deplib $new_libs" ;; + *) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$deplib $new_libs" ;; + esac + ;; + esac + ;; + esac + done + tmp_libs= + for deplib in $new_libs; do + case $deplib in + -L*) + case " $tmp_libs " in + *" $deplib "*) ;; + *) func_append tmp_libs " $deplib" ;; + esac + ;; + *) func_append tmp_libs " $deplib" ;; + esac + done + eval $var=\"$tmp_libs\" + done # for var + fi + + # Add Sun CC postdeps if required: + test CXX = "$tagname" && { + case $host_os in + linux*) + case `$CC -V 2>&1 | $SED 5q` in + *Sun\ C*) # Sun C++ 5.9 + func_suncc_cstd_abi + + if test no != "$suncc_use_cstd_abi"; then + func_append postdeps ' -library=Cstd -library=Crun' + fi + ;; + esac + ;; + + solaris*) + func_cc_basename "$CC" + case $func_cc_basename_result in + CC* | sunCC*) + func_suncc_cstd_abi + + if test no != "$suncc_use_cstd_abi"; then + func_append postdeps ' -library=Cstd -library=Crun' + fi + ;; + esac + ;; + esac + } + + # Last step: remove runtime libs from dependency_libs + # (they stay in deplibs) + tmp_libs= + for i in $dependency_libs; do + case " $predeps $postdeps $compiler_lib_search_path " in + *" $i "*) + i= + ;; + esac + if test -n "$i"; then + func_append tmp_libs " $i" + fi + done + dependency_libs=$tmp_libs + done # for pass + if test prog = "$linkmode"; then + dlfiles=$newdlfiles + fi + if test prog = "$linkmode" || test lib = "$linkmode"; then + dlprefiles=$newdlprefiles + fi + + case $linkmode in + oldlib) + if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then + func_warning "'-dlopen' is ignored for archives" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "'-l' and '-L' are ignored for archives" ;; + esac + + test -n "$rpath" && \ + func_warning "'-rpath' is ignored for archives" + + test -n "$xrpath" && \ + func_warning "'-R' is ignored for archives" + + test -n "$vinfo" && \ + func_warning "'-version-info/-version-number' is ignored for archives" + + test -n "$release" && \ + func_warning "'-release' is ignored for archives" + + test -n "$export_symbols$export_symbols_regex" && \ + func_warning "'-export-symbols' is ignored for archives" + + # Now set the variables for building old libraries. + build_libtool_libs=no + oldlibs=$output + func_append objs "$old_deplibs" + ;; + + lib) + # Make sure we only generate libraries of the form 'libNAME.la'. + case $outputname in + lib*) + func_stripname 'lib' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + ;; + *) + test no = "$module" \ + && func_fatal_help "libtool library '$output' must begin with 'lib'" + + if test no != "$need_lib_prefix"; then + # Add the "lib" prefix for modules if required + func_stripname '' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + else + func_stripname '' '.la' "$outputname" + libname=$func_stripname_result + fi + ;; + esac + + if test -n "$objs"; then + if test pass_all != "$deplibs_check_method"; then + func_fatal_error "cannot build libtool library '$output' from non-libtool objects on this host:$objs" + else + echo + $ECHO "*** Warning: Linking the shared library $output against the non-libtool" + $ECHO "*** objects $objs is not portable!" + func_append libobjs " $objs" + fi + fi + + test no = "$dlself" \ + || func_warning "'-dlopen self' is ignored for libtool libraries" + + set dummy $rpath + shift + test 1 -lt "$#" \ + && func_warning "ignoring multiple '-rpath's for a libtool library" + + install_libdir=$1 + + oldlibs= + if test -z "$rpath"; then + if test yes = "$build_libtool_libs"; then + # Building a libtool convenience library. + # Some compilers have problems with a '.al' extension so + # convenience libraries should have the same extension an + # archive normally would. + oldlibs="$output_objdir/$libname.$libext $oldlibs" + build_libtool_libs=convenience + build_old_libs=yes + fi + + test -n "$vinfo" && \ + func_warning "'-version-info/-version-number' is ignored for convenience libraries" + + test -n "$release" && \ + func_warning "'-release' is ignored for convenience libraries" + else + + # Parse the version information argument. + save_ifs=$IFS; IFS=: + set dummy $vinfo 0 0 0 + shift + IFS=$save_ifs + + test -n "$7" && \ + func_fatal_help "too many parameters to '-version-info'" + + # convert absolute version numbers to libtool ages + # this retains compatibility with .la files and attempts + # to make the code below a bit more comprehensible + + case $vinfo_number in + yes) + number_major=$1 + number_minor=$2 + number_revision=$3 + # + # There are really only two kinds -- those that + # use the current revision as the major version + # and those that subtract age and use age as + # a minor version. But, then there is irix + # that has an extra 1 added just for fun + # + case $version_type in + # correct linux to gnu/linux during the next big refactor + darwin|freebsd-elf|linux|midnightbsd-elf|osf|windows|none) + func_arith $number_major + $number_minor + current=$func_arith_result + age=$number_minor + revision=$number_revision + ;; + freebsd-aout|qnx|sunos) + current=$number_major + revision=$number_minor + age=0 + ;; + irix|nonstopux) + func_arith $number_major + $number_minor + current=$func_arith_result + age=$number_minor + revision=$number_minor + lt_irix_increment=no + ;; + esac + ;; + no) + current=$1 + revision=$2 + age=$3 + ;; + esac + + # Check that each of the things are valid numbers. + case $current in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "CURRENT '$current' must be a nonnegative integer" + func_fatal_error "'$vinfo' is not valid version information" + ;; + esac + + case $revision in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "REVISION '$revision' must be a nonnegative integer" + func_fatal_error "'$vinfo' is not valid version information" + ;; + esac + + case $age in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "AGE '$age' must be a nonnegative integer" + func_fatal_error "'$vinfo' is not valid version information" + ;; + esac + + if test "$age" -gt "$current"; then + func_error "AGE '$age' is greater than the current interface number '$current'" + func_fatal_error "'$vinfo' is not valid version information" + fi + + # Calculate the version variables. + major= + versuffix= + verstring= + case $version_type in + none) ;; + + darwin) + # Like Linux, but with the current version available in + # verstring for coding it into the library header + func_arith $current - $age + major=.$func_arith_result + versuffix=$major.$age.$revision + # Darwin ld doesn't like 0 for these options... + func_arith $current + 1 + minor_current=$func_arith_result + xlcverstring="$wl-compatibility_version $wl$minor_current $wl-current_version $wl$minor_current.$revision" + verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" + # On Darwin other compilers + case $CC in + nagfor*) + verstring="$wl-compatibility_version $wl$minor_current $wl-current_version $wl$minor_current.$revision" + ;; + *) + verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" + ;; + esac + ;; + + freebsd-aout) + major=.$current + versuffix=.$current.$revision + ;; + + freebsd-elf | midnightbsd-elf) + func_arith $current - $age + major=.$func_arith_result + versuffix=$major.$age.$revision + ;; + + irix | nonstopux) + if test no = "$lt_irix_increment"; then + func_arith $current - $age + else + func_arith $current - $age + 1 + fi + major=$func_arith_result + + case $version_type in + nonstopux) verstring_prefix=nonstopux ;; + *) verstring_prefix=sgi ;; + esac + verstring=$verstring_prefix$major.$revision + + # Add in all the interfaces that we are compatible with. + loop=$revision + while test 0 -ne "$loop"; do + func_arith $revision - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring=$verstring_prefix$major.$iface:$verstring + done + + # Before this point, $major must not contain '.'. + major=.$major + versuffix=$major.$revision + ;; + + linux) # correct to gnu/linux during the next big refactor + func_arith $current - $age + major=.$func_arith_result + versuffix=$major.$age.$revision + ;; + + osf) + func_arith $current - $age + major=.$func_arith_result + versuffix=.$current.$age.$revision + verstring=$current.$age.$revision + + # Add in all the interfaces that we are compatible with. + loop=$age + while test 0 -ne "$loop"; do + func_arith $current - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring=$verstring:$iface.0 + done + + # Make executables depend on our current version. + func_append verstring ":$current.0" + ;; + + qnx) + major=.$current + versuffix=.$current + ;; + + sco) + major=.$current + versuffix=.$current + ;; + + sunos) + major=.$current + versuffix=.$current.$revision + ;; + + windows) + # Use '-' rather than '.', since we only want one + # extension on DOS 8.3 file systems. + func_arith $current - $age + major=$func_arith_result + versuffix=-$major + ;; + + *) + func_fatal_configuration "unknown library version type '$version_type'" + ;; + esac + + # Clear the version info if we defaulted, and they specified a release. + if test -z "$vinfo" && test -n "$release"; then + major= + case $version_type in + darwin) + # we can't check for "0.0" in archive_cmds due to quoting + # problems, so we reset it completely + verstring= + ;; + *) + verstring=0.0 + ;; + esac + if test no = "$need_version"; then + versuffix= + else + versuffix=.0.0 + fi + fi + + # Remove version info from name if versioning should be avoided + if test yes,no = "$avoid_version,$need_version"; then + major= + versuffix= + verstring= + fi + + # Check to see if the archive will have undefined symbols. + if test yes = "$allow_undefined"; then + if test unsupported = "$allow_undefined_flag"; then + if test yes = "$build_old_libs"; then + func_warning "undefined symbols not allowed in $host shared libraries; building static only" + build_libtool_libs=no + else + func_fatal_error "can't build $host shared library unless -no-undefined is specified" + fi + fi + else + # Don't allow undefined symbols. + allow_undefined_flag=$no_undefined_flag + fi + + fi + + func_generate_dlsyms "$libname" "$libname" : + func_append libobjs " $symfileobj" + test " " = "$libobjs" && libobjs= + + if test relink != "$opt_mode"; then + # Remove our outputs, but don't remove object files since they + # may have been created when compiling PIC objects. + removelist= + tempremovelist=`$ECHO "$output_objdir/*"` + for p in $tempremovelist; do + case $p in + *.$objext | *.gcno) + ;; + $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/$libname$release.*) + if test -n "$precious_files_regex"; then + if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 + then + continue + fi + fi + func_append removelist " $p" + ;; + *) ;; + esac + done + test -n "$removelist" && \ + func_show_eval "${RM}r \$removelist" + fi + + # Now set the variables for building old libraries. + if test yes = "$build_old_libs" && test convenience != "$build_libtool_libs"; then + func_append oldlibs " $output_objdir/$libname.$libext" + + # Transform .lo files to .o files. + oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.$libext$/d; $lo2o" | $NL2SP` + fi + + # Eliminate all temporary directories. + #for path in $notinst_path; do + # lib_search_path=`$ECHO "$lib_search_path " | $SED "s% $path % %g"` + # deplibs=`$ECHO "$deplibs " | $SED "s% -L$path % %g"` + # dependency_libs=`$ECHO "$dependency_libs " | $SED "s% -L$path % %g"` + #done + + if test -n "$xrpath"; then + # If the user specified any rpath flags, then add them. + temp_xrpath= + for libdir in $xrpath; do + func_replace_sysroot "$libdir" + func_append temp_xrpath " -R$func_replace_sysroot_result" + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + done + if test yes != "$hardcode_into_libs" || test yes = "$build_old_libs"; then + dependency_libs="$temp_xrpath $dependency_libs" + fi + fi + + # Make sure dlfiles contains only unique files that won't be dlpreopened + old_dlfiles=$dlfiles + dlfiles= + for lib in $old_dlfiles; do + case " $dlprefiles $dlfiles " in + *" $lib "*) ;; + *) func_append dlfiles " $lib" ;; + esac + done + + # Make sure dlprefiles contains only unique files + old_dlprefiles=$dlprefiles + dlprefiles= + for lib in $old_dlprefiles; do + case "$dlprefiles " in + *" $lib "*) ;; + *) func_append dlprefiles " $lib" ;; + esac + done + + if test yes = "$build_libtool_libs"; then + if test -n "$rpath"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc* | *-*-haiku*) + # these systems don't actually have a c library (as such)! + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C library is in the System framework + func_append deplibs " System.ltframework" + ;; + *-*-netbsd*) + # Don't link with libc until the a.out ld.so is fixed. + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-midnightbsd*) + # Do not include libc due to us having libc/libc_r. + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + ;; + *) + # Add libc to deplibs on all other systems if necessary. + if test yes = "$build_libtool_need_lc"; then + func_append deplibs " -lc" + fi + ;; + esac + fi + + # Transform deplibs into only deplibs that can be linked in shared. + name_save=$name + libname_save=$libname + release_save=$release + versuffix_save=$versuffix + major_save=$major + # I'm not sure if I'm treating the release correctly. I think + # release should show up in the -l (ie -lgmp5) so we don't want to + # add it in twice. Is that correct? + release= + versuffix= + major= + newdeplibs= + droppeddeps=no + case $deplibs_check_method in + pass_all) + # Don't check for shared/static. Everything works. + # This might be a little naive. We might want to check + # whether the library exists or not. But this is on + # osf3 & osf4 and I'm not really sure... Just + # implementing what was already the behavior. + newdeplibs=$deplibs + ;; + test_compile) + # This code stresses the "libraries are programs" paradigm to its + # limits. Maybe even breaks it. We compile a program, linking it + # against the deplibs as a proxy for the library. Then we can check + # whether they linked in statically or dynamically with ldd. + $opt_dry_run || $RM conftest.c + cat > conftest.c </dev/null` + $nocaseglob + else + potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null` + fi + for potent_lib in $potential_libs; do + # Follow soft links. + if ls -lLd "$potent_lib" 2>/dev/null | + $GREP " -> " >/dev/null; then + continue + fi + # The statement above tries to avoid entering an + # endless loop below, in case of cyclic links. + # We might still enter an endless loop, since a link + # loop can be closed while we follow links, + # but so what? + potlib=$potent_lib + while test -h "$potlib" 2>/dev/null; do + potliblink=`ls -ld $potlib | $SED 's/.* -> //'` + case $potliblink in + [\\/]* | [A-Za-z]:[\\/]*) potlib=$potliblink;; + *) potlib=`$ECHO "$potlib" | $SED 's|[^/]*$||'`"$potliblink";; + esac + done + if eval $file_magic_cmd \"\$potlib\" 2>/dev/null | + $SED -e 10q | + $EGREP "$file_magic_regex" > /dev/null; then + func_append newdeplibs " $a_deplib" + a_deplib= + break 2 + fi + done + done + fi + if test -n "$a_deplib"; then + droppeddeps=yes + echo + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib"; then + $ECHO "*** with $libname but no candidates were found. (...for file magic test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a file magic. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + for a_deplib in $deplibs; do + case $a_deplib in + -l*) + func_stripname -l '' "$a_deplib" + name=$func_stripname_result + if test yes = "$allow_libtool_libs_with_static_runtimes"; then + case " $predeps $postdeps " in + *" $a_deplib "*) + func_append newdeplibs " $a_deplib" + a_deplib= + ;; + esac + fi + if test -n "$a_deplib"; then + libname=`eval "\\$ECHO \"$libname_spec\""` + for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do + potential_libs=`ls $i/$libname[.-]* 2>/dev/null` + for potent_lib in $potential_libs; do + potlib=$potent_lib # see symlink-check above in file_magic test + if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \ + $EGREP "$match_pattern_regex" > /dev/null; then + func_append newdeplibs " $a_deplib" + a_deplib= + break 2 + fi + done + done + fi + if test -n "$a_deplib"; then + droppeddeps=yes + echo + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib"; then + $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a regex pattern. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + none | unknown | *) + newdeplibs= + tmp_deplibs=`$ECHO " $deplibs" | $SED 's/ -lc$//; s/ -[LR][^ ]*//g'` + if test yes = "$allow_libtool_libs_with_static_runtimes"; then + for i in $predeps $postdeps; do + # can't use Xsed below, because $i might contain '/' + tmp_deplibs=`$ECHO " $tmp_deplibs" | $SED "s|$i||"` + done + fi + case $tmp_deplibs in + *[!\ \ ]*) + echo + if test none = "$deplibs_check_method"; then + echo "*** Warning: inter-library dependencies are not supported in this platform." + else + echo "*** Warning: inter-library dependencies are not known to be supported." + fi + echo "*** All declared inter-library dependencies are being dropped." + droppeddeps=yes + ;; + esac + ;; + esac + versuffix=$versuffix_save + major=$major_save + release=$release_save + libname=$libname_save + name=$name_save + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library with the System framework + newdeplibs=`$ECHO " $newdeplibs" | $SED 's/ -lc / System.ltframework /'` + ;; + esac + + if test yes = "$droppeddeps"; then + if test yes = "$module"; then + echo + echo "*** Warning: libtool could not satisfy all declared inter-library" + $ECHO "*** dependencies of module $libname. Therefore, libtool will create" + echo "*** a static module, that should work as long as the dlopening" + echo "*** application is linked with the -dlopen flag." + if test -z "$global_symbol_pipe"; then + echo + echo "*** However, this would only work if libtool was able to extract symbol" + echo "*** lists from a program, using 'nm' or equivalent, but libtool could" + echo "*** not find such a program. So, this module is probably useless." + echo "*** 'nm' from GNU binutils and a full rebuild may help." + fi + if test no = "$build_old_libs"; then + oldlibs=$output_objdir/$libname.$libext + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + else + echo "*** The inter-library dependencies that have been dropped here will be" + echo "*** automatically added whenever a program is linked with this library" + echo "*** or is declared to -dlopen it." + + if test no = "$allow_undefined"; then + echo + echo "*** Since this library must not contain undefined symbols," + echo "*** because either the platform does not support them or" + echo "*** it was explicitly requested with -no-undefined," + echo "*** libtool will only create a static version of it." + if test no = "$build_old_libs"; then + oldlibs=$output_objdir/$libname.$libext + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + fi + fi + # Done checking deplibs! + deplibs=$newdeplibs + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + case $host in + *-*-darwin*) + newdeplibs=`$ECHO " $newdeplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + deplibs=`$ECHO " $deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $deplibs " in + *" -L$path/$objdir "*) + func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) func_append new_libs " $deplib" ;; + esac + ;; + *) func_append new_libs " $deplib" ;; + esac + done + deplibs=$new_libs + + # All the library-specific variables (install_libdir is set above). + library_names= + old_library= + dlname= + + # Test again, we may have decided not to build it any more + if test yes = "$build_libtool_libs"; then + # Remove $wl instances when linking with ld. + # FIXME: should test the right _cmds variable. + case $archive_cmds in + *\$LD\ *) wl= ;; + esac + if test yes = "$hardcode_into_libs"; then + # Hardcode the library paths + hardcode_libdirs= + dep_rpath= + rpath=$finalize_rpath + test relink = "$opt_mode" || rpath=$compile_rpath$rpath + for libdir in $rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + func_replace_sysroot "$libdir" + libdir=$func_replace_sysroot_result + if test -z "$hardcode_libdirs"; then + hardcode_libdirs=$libdir + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append dep_rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) func_append perm_rpath " $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir=$hardcode_libdirs + eval "dep_rpath=\"$hardcode_libdir_flag_spec\"" + fi + if test -n "$runpath_var" && test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + func_append rpath "$dir:" + done + eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" + fi + test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" + fi + + shlibpath=$finalize_shlibpath + test relink = "$opt_mode" || shlibpath=$compile_shlibpath$shlibpath + if test -n "$shlibpath"; then + eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" + fi + + # Get the real and link names of the library. + eval shared_ext=\"$shrext_cmds\" + eval library_names=\"$library_names_spec\" + set dummy $library_names + shift + realname=$1 + shift + + if test -n "$soname_spec"; then + eval soname=\"$soname_spec\" + else + soname=$realname + fi + if test -z "$dlname"; then + dlname=$soname + fi + + lib=$output_objdir/$realname + linknames= + for link + do + func_append linknames " $link" + done + + # Use standard objects if they are pic + test -z "$pic_flag" && libobjs=`$ECHO "$libobjs" | $SP2NL | $SED "$lo2o" | $NL2SP` + test "X$libobjs" = "X " && libobjs= + + delfiles= + if test -n "$export_symbols" && test -n "$include_expsyms"; then + $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp" + export_symbols=$output_objdir/$libname.uexp + func_append delfiles " $export_symbols" + fi + + orig_export_symbols= + case $host_os in + cygwin* | mingw* | cegcc*) + if test -n "$export_symbols" && test -z "$export_symbols_regex"; then + # exporting using user supplied symfile + func_dll_def_p "$export_symbols" || { + # and it's NOT already a .def file. Must figure out + # which of the given symbols are data symbols and tag + # them as such. So, trigger use of export_symbols_cmds. + # export_symbols gets reassigned inside the "prepare + # the list of exported symbols" if statement, so the + # include_expsyms logic still works. + orig_export_symbols=$export_symbols + export_symbols= + always_export_symbols=yes + } + fi + ;; + esac + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + if test yes = "$always_export_symbols" || test -n "$export_symbols_regex"; then + func_verbose "generating symbol list for '$libname.la'" + export_symbols=$output_objdir/$libname.exp + $opt_dry_run || $RM $export_symbols + cmds=$export_symbols_cmds + save_ifs=$IFS; IFS='~' + for cmd1 in $cmds; do + IFS=$save_ifs + # Take the normal branch if the nm_file_list_spec branch + # doesn't work or if tool conversion is not needed. + case $nm_file_list_spec~$to_tool_file_cmd in + *~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~*) + try_normal_branch=yes + eval cmd=\"$cmd1\" + func_len " $cmd" + len=$func_len_result + ;; + *) + try_normal_branch=no + ;; + esac + if test yes = "$try_normal_branch" \ + && { test "$len" -lt "$max_cmd_len" \ + || test "$max_cmd_len" -le -1; } + then + func_show_eval "$cmd" 'exit $?' + skipped_export=false + elif test -n "$nm_file_list_spec"; then + func_basename "$output" + output_la=$func_basename_result + save_libobjs=$libobjs + save_output=$output + output=$output_objdir/$output_la.nm + func_to_tool_file "$output" + libobjs=$nm_file_list_spec$func_to_tool_file_result + func_append delfiles " $output" + func_verbose "creating $NM input file list: $output" + for obj in $save_libobjs; do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" + done > "$output" + eval cmd=\"$cmd1\" + func_show_eval "$cmd" 'exit $?' + output=$save_output + libobjs=$save_libobjs + skipped_export=false + else + # The command line is too long to execute in one step. + func_verbose "using reloadable object file for export list..." + skipped_export=: + # Break out early, otherwise skipped_export may be + # set to false by a later but shorter cmd. + break + fi + done + IFS=$save_ifs + if test -n "$export_symbols_regex" && test : != "$skipped_export"; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + fi + + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols=$export_symbols + test -n "$orig_export_symbols" && tmp_export_symbols=$orig_export_symbols + $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test : != "$skipped_export" && test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for '$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands, which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + + tmp_deplibs= + for test_deplib in $deplibs; do + case " $convenience " in + *" $test_deplib "*) ;; + *) + func_append tmp_deplibs " $test_deplib" + ;; + esac + done + deplibs=$tmp_deplibs + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec" && + test yes = "$compiler_needs_object" && + test -z "$libobjs"; then + # extract the archives, so we have objects to list. + # TODO: could optimize this to just extract one archive. + whole_archive_flag_spec= + fi + if test -n "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + else + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $convenience + func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + fi + + if test yes = "$thread_safe" && test -n "$thread_safe_flag_spec"; then + eval flag=\"$thread_safe_flag_spec\" + func_append linker_flags " $flag" + fi + + # Make a backup of the uninstalled library when relinking + if test relink = "$opt_mode"; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $? + fi + + # Do each of the archive commands. + if test yes = "$module" && test -n "$module_cmds"; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + eval test_cmds=\"$module_expsym_cmds\" + cmds=$module_expsym_cmds + else + eval test_cmds=\"$module_cmds\" + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + eval test_cmds=\"$archive_expsym_cmds\" + cmds=$archive_expsym_cmds + else + eval test_cmds=\"$archive_cmds\" + cmds=$archive_cmds + fi + fi + + if test : != "$skipped_export" && + func_len " $test_cmds" && + len=$func_len_result && + test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + : + else + # The command line is too long to link in one step, link piecewise + # or, if using GNU ld and skipped_export is not :, use a linker + # script. + + # Save the value of $output and $libobjs because we want to + # use them later. If we have whole_archive_flag_spec, we + # want to use save_libobjs as it was before + # whole_archive_flag_spec was expanded, because we can't + # assume the linker understands whole_archive_flag_spec. + # This may have to be revisited, in case too many + # convenience libraries get linked in and end up exceeding + # the spec. + if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + fi + save_output=$output + func_basename "$output" + output_la=$func_basename_result + + # Clear the reloadable object creation command queue and + # initialize k to one. + test_cmds= + concat_cmds= + objlist= + last_robj= + k=1 + + if test -n "$save_libobjs" && test : != "$skipped_export" && test yes = "$with_gnu_ld"; then + output=$output_objdir/$output_la.lnkscript + func_verbose "creating GNU ld script: $output" + echo 'INPUT (' > $output + for obj in $save_libobjs + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" >> $output + done + echo ')' >> $output + func_append delfiles " $output" + func_to_tool_file "$output" + output=$func_to_tool_file_result + elif test -n "$save_libobjs" && test : != "$skipped_export" && test -n "$file_list_spec"; then + output=$output_objdir/$output_la.lnk + func_verbose "creating linker input file list: $output" + : > $output + set x $save_libobjs + shift + firstobj= + if test yes = "$compiler_needs_object"; then + firstobj="$1 " + shift + fi + for obj + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" >> $output + done + func_append delfiles " $output" + func_to_tool_file "$output" + output=$firstobj\"$file_list_spec$func_to_tool_file_result\" + else + if test -n "$save_libobjs"; then + func_verbose "creating reloadable object files..." + output=$output_objdir/$output_la-$k.$objext + eval test_cmds=\"$reload_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + + # Loop over the list of objects to be linked. + for obj in $save_libobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + if test -z "$objlist" || + test "$len" -lt "$max_cmd_len"; then + func_append objlist " $obj" + else + # The command $test_cmds is almost too long, add a + # command to the queue. + if test 1 -eq "$k"; then + # The first file doesn't have a previous command to add. + reload_objs=$objlist + eval concat_cmds=\"$reload_cmds\" + else + # All subsequent reloadable object files will link in + # the last one created. + reload_objs="$objlist $last_robj" + eval concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\" + fi + last_robj=$output_objdir/$output_la-$k.$objext + func_arith $k + 1 + k=$func_arith_result + output=$output_objdir/$output_la-$k.$objext + objlist=" $obj" + func_len " $last_robj" + func_arith $len0 + $func_len_result + len=$func_arith_result + fi + done + # Handle the remaining objects by creating one last + # reloadable object file. All subsequent reloadable object + # files will link in the last one created. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + reload_objs="$objlist $last_robj" + eval concat_cmds=\"\$concat_cmds$reload_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" + fi + func_append delfiles " $output" + + else + output= + fi + + ${skipped_export-false} && { + func_verbose "generating symbol list for '$libname.la'" + export_symbols=$output_objdir/$libname.exp + $opt_dry_run || $RM $export_symbols + libobjs=$output + # Append the command to create the export file. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" + fi + } + + test -n "$save_libobjs" && + func_verbose "creating a temporary reloadable object file: $output" + + # Loop through the commands generated above and execute them. + save_ifs=$IFS; IFS='~' + for cmd in $concat_cmds; do + IFS=$save_ifs + $opt_quiet || { + func_quote_arg expand,pretty "$cmd" + eval "func_echo $func_quote_arg_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test relink = "$opt_mode"; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS=$save_ifs + + if test -n "$export_symbols_regex" && ${skipped_export-false}; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + + ${skipped_export-false} && { + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols=$export_symbols + test -n "$orig_export_symbols" && tmp_export_symbols=$orig_export_symbols + $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for '$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands, which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + } + + libobjs=$output + # Restore the value of output. + output=$save_output + + if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + fi + # Expand the library linking commands again to reset the + # value of $libobjs for piecewise linking. + + # Do each of the archive commands. + if test yes = "$module" && test -n "$module_cmds"; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + cmds=$module_expsym_cmds + else + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + cmds=$archive_expsym_cmds + else + cmds=$archive_cmds + fi + fi + fi + + if test -n "$delfiles"; then + # Append the command to remove temporary files to $cmds. + eval cmds=\"\$cmds~\$RM $delfiles\" + fi + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles + func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + + save_ifs=$IFS; IFS='~' + for cmd in $cmds; do + IFS=$sp$nl + eval cmd=\"$cmd\" + IFS=$save_ifs + $opt_quiet || { + func_quote_arg expand,pretty "$cmd" + eval "func_echo $func_quote_arg_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test relink = "$opt_mode"; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS=$save_ifs + + # Restore the uninstalled library and exit + if test relink = "$opt_mode"; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $? + + if test -n "$convenience"; then + if test -z "$whole_archive_flag_spec"; then + func_show_eval '${RM}r "$gentop"' + fi + fi + + exit $EXIT_SUCCESS + fi + + # Create links to the real library. + for linkname in $linknames; do + if test "$realname" != "$linkname"; then + func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?' + fi + done + + # If -module or -export-dynamic was specified, set the dlname. + if test yes = "$module" || test yes = "$export_dynamic"; then + # On all known operating systems, these are identical. + dlname=$soname + fi + fi + ;; + + obj) + if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then + func_warning "'-dlopen' is ignored for objects" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "'-l' and '-L' are ignored for objects" ;; + esac + + test -n "$rpath" && \ + func_warning "'-rpath' is ignored for objects" + + test -n "$xrpath" && \ + func_warning "'-R' is ignored for objects" + + test -n "$vinfo" && \ + func_warning "'-version-info' is ignored for objects" + + test -n "$release" && \ + func_warning "'-release' is ignored for objects" + + case $output in + *.lo) + test -n "$objs$old_deplibs" && \ + func_fatal_error "cannot build library object '$output' from non-libtool objects" + + libobj=$output + func_lo2o "$libobj" + obj=$func_lo2o_result + ;; + *) + libobj= + obj=$output + ;; + esac + + # Delete the old objects. + $opt_dry_run || $RM $obj $libobj + + # Objects from convenience libraries. This assumes + # single-version convenience libraries. Whenever we create + # different ones for PIC/non-PIC, this we'll have to duplicate + # the extraction. + reload_conv_objs= + gentop= + # if reload_cmds runs $LD directly, get rid of -Wl from + # whole_archive_flag_spec and hope we can get by with turning comma + # into space. + case $reload_cmds in + *\$LD[\ \$]*) wl= ;; + esac + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec"; then + eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\" + test -n "$wl" || tmp_whole_archive_flags=`$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'` + reload_conv_objs=$reload_objs\ $tmp_whole_archive_flags + else + gentop=$output_objdir/${obj}x + func_append generated " $gentop" + + func_extract_archives $gentop $convenience + reload_conv_objs="$reload_objs $func_extract_archives_result" + fi + fi + + # If we're not building shared, we need to use non_pic_objs + test yes = "$build_libtool_libs" || libobjs=$non_pic_objects + + # Create the old-style object. + reload_objs=$objs$old_deplibs' '`$ECHO "$libobjs" | $SP2NL | $SED "/\.$libext$/d; /\.lib$/d; $lo2o" | $NL2SP`' '$reload_conv_objs + + output=$obj + func_execute_cmds "$reload_cmds" 'exit $?' + + # Exit if we aren't doing a library object file. + if test -z "$libobj"; then + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + fi + + test yes = "$build_libtool_libs" || { + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + # Create an invalid libtool object if no PIC, so that we don't + # accidentally link it into a program. + # $show "echo timestamp > $libobj" + # $opt_dry_run || eval "echo timestamp > $libobj" || exit $? + exit $EXIT_SUCCESS + } + + if test -n "$pic_flag" || test default != "$pic_mode"; then + # Only do commands if we really have different PIC objects. + reload_objs="$libobjs $reload_conv_objs" + output=$libobj + func_execute_cmds "$reload_cmds" 'exit $?' + fi + + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + ;; + + prog) + case $host in + *cygwin*) func_stripname '' '.exe' "$output" + output=$func_stripname_result.exe;; + esac + test -n "$vinfo" && \ + func_warning "'-version-info' is ignored for programs" + + test -n "$release" && \ + func_warning "'-release' is ignored for programs" + + $preload \ + && test unknown,unknown,unknown = "$dlopen_support,$dlopen_self,$dlopen_self_static" \ + && func_warning "'LT_INIT([dlopen])' not used. Assuming no dlopen support." + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library is the System framework + compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's/ -lc / System.ltframework /'` + finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's/ -lc / System.ltframework /'` + ;; + esac + + case $host in + *-*-darwin*) + # Don't allow lazy linking, it breaks C++ global constructors + # But is supposedly fixed on 10.4 or later (yay!). + if test CXX = "$tagname"; then + case ${MACOSX_DEPLOYMENT_TARGET-10.0} in + 10.[0123]) + func_append compile_command " $wl-bind_at_load" + func_append finalize_command " $wl-bind_at_load" + ;; + esac + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $compile_deplibs " in + *" -L$path/$objdir "*) + func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $compile_deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) func_append new_libs " $deplib" ;; + esac + ;; + *) func_append new_libs " $deplib" ;; + esac + done + compile_deplibs=$new_libs + + + func_append compile_command " $compile_deplibs" + func_append finalize_command " $finalize_deplibs" + + if test -n "$rpath$xrpath"; then + # If the user specified any rpath flags, then add them. + for libdir in $rpath $xrpath; do + # This is the magic to use -rpath. + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + done + fi + + # Now hardcode the library paths + rpath= + hardcode_libdirs= + for libdir in $compile_rpath $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs=$libdir + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) func_append perm_rpath " $libdir" ;; + esac + fi + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`$ECHO "$libdir" | $SED -e 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$libdir:"*) ;; + ::) dllsearchpath=$libdir;; + *) func_append dllsearchpath ":$libdir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir=$hardcode_libdirs + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + compile_rpath=$rpath + + rpath= + hardcode_libdirs= + for libdir in $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs=$libdir + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$finalize_perm_rpath " in + *" $libdir "*) ;; + *) func_append finalize_perm_rpath " $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir=$hardcode_libdirs + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + finalize_rpath=$rpath + + if test -n "$libobjs" && test yes = "$build_old_libs"; then + # Transform all the library objects into standard objects. + compile_command=`$ECHO "$compile_command" | $SP2NL | $SED "$lo2o" | $NL2SP` + finalize_command=`$ECHO "$finalize_command" | $SP2NL | $SED "$lo2o" | $NL2SP` + fi + + func_generate_dlsyms "$outputname" "@PROGRAM@" false + + # template prelinking step + if test -n "$prelink_cmds"; then + func_execute_cmds "$prelink_cmds" 'exit $?' + fi + + wrappers_required=: + case $host in + *cegcc* | *mingw32ce*) + # Disable wrappers for cegcc and mingw32ce hosts, we are cross compiling anyway. + wrappers_required=false + ;; + *cygwin* | *mingw* ) + test yes = "$build_libtool_libs" || wrappers_required=false + ;; + *) + if test no = "$need_relink" || test yes != "$build_libtool_libs"; then + wrappers_required=false + fi + ;; + esac + $wrappers_required || { + # Replace the output file specification. + compile_command=`$ECHO "$compile_command" | $SED 's%@OUTPUT@%'"$output"'%g'` + link_command=$compile_command$compile_rpath + + # We have no uninstalled library dependencies, so finalize right now. + exit_status=0 + func_show_eval "$link_command" 'exit_status=$?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + # Delete the generated files. + if test -f "$output_objdir/${outputname}S.$objext"; then + func_show_eval '$RM "$output_objdir/${outputname}S.$objext"' + fi + + exit $exit_status + } + + if test -n "$compile_shlibpath$finalize_shlibpath"; then + compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" + fi + if test -n "$finalize_shlibpath"; then + finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" + fi + + compile_var= + finalize_var= + if test -n "$runpath_var"; then + if test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + func_append rpath "$dir:" + done + compile_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + if test -n "$finalize_perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $finalize_perm_rpath; do + func_append rpath "$dir:" + done + finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + fi + + if test yes = "$no_install"; then + # We don't need to create a wrapper script. + link_command=$compile_var$compile_command$compile_rpath + # Replace the output file specification. + link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output"'%g'` + # Delete the old output file. + $opt_dry_run || $RM $output + # Link the executable and exit + func_show_eval "$link_command" 'exit $?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + exit $EXIT_SUCCESS + fi + + case $hardcode_action,$fast_install in + relink,*) + # Fast installation is not supported + link_command=$compile_var$compile_command$compile_rpath + relink_command=$finalize_var$finalize_command$finalize_rpath + + func_warning "this platform does not like uninstalled shared libraries" + func_warning "'$output' will be relinked during installation" + ;; + *,yes) + link_command=$finalize_var$compile_command$finalize_rpath + relink_command=`$ECHO "$compile_var$compile_command$compile_rpath" | $SED 's%@OUTPUT@%\$progdir/\$file%g'` + ;; + *,no) + link_command=$compile_var$compile_command$compile_rpath + relink_command=$finalize_var$finalize_command$finalize_rpath + ;; + *,needless) + link_command=$finalize_var$compile_command$finalize_rpath + relink_command= + ;; + esac + + # Replace the output file specification. + link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` + + # Delete the old output files. + $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname + + func_show_eval "$link_command" 'exit $?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output_objdir/$outputname" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + # Now create the wrapper script. + func_verbose "creating $output" + + # Quote the relink command for shipping. + if test -n "$relink_command"; then + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_arg pretty "$var_value" + relink_command="$var=$func_quote_arg_result; export $var; $relink_command" + fi + done + func_quote eval cd "`pwd`" + func_quote_arg pretty,unquoted "($func_quote_result; $relink_command)" + relink_command=$func_quote_arg_unquoted_result + fi + + # Only actually do things if not in dry run mode. + $opt_dry_run || { + # win32 will think the script is a binary if it has + # a .exe suffix, so we strip it off here. + case $output in + *.exe) func_stripname '' '.exe' "$output" + output=$func_stripname_result ;; + esac + # test for cygwin because mv fails w/o .exe extensions + case $host in + *cygwin*) + exeext=.exe + func_stripname '' '.exe' "$outputname" + outputname=$func_stripname_result ;; + *) exeext= ;; + esac + case $host in + *cygwin* | *mingw* ) + func_dirname_and_basename "$output" "" "." + output_name=$func_basename_result + output_path=$func_dirname_result + cwrappersource=$output_path/$objdir/lt-$output_name.c + cwrapper=$output_path/$output_name.exe + $RM $cwrappersource $cwrapper + trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 + + func_emit_cwrapperexe_src > $cwrappersource + + # The wrapper executable is built using the $host compiler, + # because it contains $host paths and files. If cross- + # compiling, it, like the target executable, must be + # executed on the $host or under an emulation environment. + $opt_dry_run || { + $LTCC $LTCFLAGS -o $cwrapper $cwrappersource + $STRIP $cwrapper + } + + # Now, create the wrapper script for func_source use: + func_ltwrapper_scriptname $cwrapper + $RM $func_ltwrapper_scriptname_result + trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15 + $opt_dry_run || { + # note: this script will not be executed, so do not chmod. + if test "x$build" = "x$host"; then + $cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result + else + func_emit_wrapper no > $func_ltwrapper_scriptname_result + fi + } + ;; + * ) + $RM $output + trap "$RM $output; exit $EXIT_FAILURE" 1 2 15 + + func_emit_wrapper no > $output + chmod +x $output + ;; + esac + } + exit $EXIT_SUCCESS + ;; + esac + + # See if we need to build an old-fashioned archive. + for oldlib in $oldlibs; do + + case $build_libtool_libs in + convenience) + oldobjs="$libobjs_save $symfileobj" + addlibs=$convenience + build_libtool_libs=no + ;; + module) + oldobjs=$libobjs_save + addlibs=$old_convenience + build_libtool_libs=no + ;; + *) + oldobjs="$old_deplibs $non_pic_objects" + $preload && test -f "$symfileobj" \ + && func_append oldobjs " $symfileobj" + addlibs=$old_convenience + ;; + esac + + if test -n "$addlibs"; then + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $addlibs + func_append oldobjs " $func_extract_archives_result" + fi + + # Do each command in the archive commands. + if test -n "$old_archive_from_new_cmds" && test yes = "$build_libtool_libs"; then + cmds=$old_archive_from_new_cmds + else + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles + func_append oldobjs " $func_extract_archives_result" + fi + + # POSIX demands no paths to be encoded in archives. We have + # to avoid creating archives with duplicate basenames if we + # might have to extract them afterwards, e.g., when creating a + # static archive out of a convenience library, or when linking + # the entirety of a libtool archive into another (currently + # not supported by libtool). + if (for obj in $oldobjs + do + func_basename "$obj" + $ECHO "$func_basename_result" + done | sort | sort -uc >/dev/null 2>&1); then + : + else + echo "copying selected object files to avoid basename conflicts..." + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + func_mkdir_p "$gentop" + save_oldobjs=$oldobjs + oldobjs= + counter=1 + for obj in $save_oldobjs + do + func_basename "$obj" + objbase=$func_basename_result + case " $oldobjs " in + " ") oldobjs=$obj ;; + *[\ /]"$objbase "*) + while :; do + # Make sure we don't pick an alternate name that also + # overlaps. + newobj=lt$counter-$objbase + func_arith $counter + 1 + counter=$func_arith_result + case " $oldobjs " in + *[\ /]"$newobj "*) ;; + *) if test ! -f "$gentop/$newobj"; then break; fi ;; + esac + done + func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" + func_append oldobjs " $gentop/$newobj" + ;; + *) func_append oldobjs " $obj" ;; + esac + done + fi + func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 + tool_oldlib=$func_to_tool_file_result + eval cmds=\"$old_archive_cmds\" + + func_len " $cmds" + len=$func_len_result + if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + cmds=$old_archive_cmds + elif test -n "$archiver_list_spec"; then + func_verbose "using command file archive linking..." + for obj in $oldobjs + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" + done > $output_objdir/$libname.libcmd + func_to_tool_file "$output_objdir/$libname.libcmd" + oldobjs=" $archiver_list_spec$func_to_tool_file_result" + cmds=$old_archive_cmds + else + # the command line is too long to link in one step, link in parts + func_verbose "using piecewise archive linking..." + save_RANLIB=$RANLIB + RANLIB=: + objlist= + concat_cmds= + save_oldobjs=$oldobjs + oldobjs= + # Is there a better way of finding the last object in the list? + for obj in $save_oldobjs + do + last_oldobj=$obj + done + eval test_cmds=\"$old_archive_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + for obj in $save_oldobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + func_append objlist " $obj" + if test "$len" -lt "$max_cmd_len"; then + : + else + # the above command should be used before it gets too long + oldobjs=$objlist + if test "$obj" = "$last_oldobj"; then + RANLIB=$save_RANLIB + fi + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\$concat_cmds$old_archive_cmds\" + objlist= + len=$len0 + fi + done + RANLIB=$save_RANLIB + oldobjs=$objlist + if test -z "$oldobjs"; then + eval cmds=\"\$concat_cmds\" + else + eval cmds=\"\$concat_cmds~\$old_archive_cmds\" + fi + fi + fi + func_execute_cmds "$cmds" 'exit $?' + done + + test -n "$generated" && \ + func_show_eval "${RM}r$generated" + + # Now create the libtool archive. + case $output in + *.la) + old_library= + test yes = "$build_old_libs" && old_library=$libname.$libext + func_verbose "creating $output" + + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_arg pretty,unquoted "$var_value" + relink_command="$var=$func_quote_arg_unquoted_result; export $var; $relink_command" + fi + done + # Quote the link command for shipping. + func_quote eval cd "`pwd`" + relink_command="($func_quote_result; $SHELL \"$progpath\" $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" + func_quote_arg pretty,unquoted "$relink_command" + relink_command=$func_quote_arg_unquoted_result + if test yes = "$hardcode_automatic"; then + relink_command= + fi + + # Only create the output if not a dry run. + $opt_dry_run || { + for installed in no yes; do + if test yes = "$installed"; then + if test -z "$install_libdir"; then + break + fi + output=$output_objdir/${outputname}i + # Replace all uninstalled libtool libraries with the installed ones + newdependency_libs= + for deplib in $dependency_libs; do + case $deplib in + *.la) + func_basename "$deplib" + name=$func_basename_result + func_resolve_sysroot "$deplib" + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result` + test -z "$libdir" && \ + func_fatal_error "'$deplib' is not a valid libtool archive" + func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name" + ;; + -L*) + func_stripname -L '' "$deplib" + func_replace_sysroot "$func_stripname_result" + func_append newdependency_libs " -L$func_replace_sysroot_result" + ;; + -R*) + func_stripname -R '' "$deplib" + func_replace_sysroot "$func_stripname_result" + func_append newdependency_libs " -R$func_replace_sysroot_result" + ;; + *) func_append newdependency_libs " $deplib" ;; + esac + done + dependency_libs=$newdependency_libs + newdlfiles= + + for lib in $dlfiles; do + case $lib in + *.la) + func_basename "$lib" + name=$func_basename_result + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "'$lib' is not a valid libtool archive" + func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name" + ;; + *) func_append newdlfiles " $lib" ;; + esac + done + dlfiles=$newdlfiles + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + *.la) + # Only pass preopened files to the pseudo-archive (for + # eventual linking with the app. that links it) if we + # didn't already link the preopened objects directly into + # the library: + func_basename "$lib" + name=$func_basename_result + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "'$lib' is not a valid libtool archive" + func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name" + ;; + esac + done + dlprefiles=$newdlprefiles + else + newdlfiles= + for lib in $dlfiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs=$lib ;; + *) abs=`pwd`"/$lib" ;; + esac + func_append newdlfiles " $abs" + done + dlfiles=$newdlfiles + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs=$lib ;; + *) abs=`pwd`"/$lib" ;; + esac + func_append newdlprefiles " $abs" + done + dlprefiles=$newdlprefiles + fi + $RM $output + # place dlname in correct position for cygwin + # In fact, it would be nice if we could use this code for all target + # systems that can't hard-code library paths into their executables + # and that have no shared library path variable independent of PATH, + # but it turns out we can't easily determine that from inspecting + # libtool variables, so we have to hard-code the OSs to which it + # applies here; at the moment, that means platforms that use the PE + # object format with DLL files. See the long comment at the top of + # tests/bindir.at for full details. + tdlname=$dlname + case $host,$output,$installed,$module,$dlname in + *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll) + # If a -bindir argument was supplied, place the dll there. + if test -n "$bindir"; then + func_relative_path "$install_libdir" "$bindir" + tdlname=$func_relative_path_result/$dlname + else + # Otherwise fall back on heuristic. + tdlname=../bin/$dlname + fi + ;; + esac + $ECHO > $output "\ +# $outputname - a libtool library file +# Generated by $PROGRAM (GNU $PACKAGE) $VERSION +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='$tdlname' + +# Names of this library. +library_names='$library_names' + +# The name of the static archive. +old_library='$old_library' + +# Linker flags that cannot go in dependency_libs. +inherited_linker_flags='$new_inherited_linker_flags' + +# Libraries that this one depends upon. +dependency_libs='$dependency_libs' + +# Names of additional weak libraries provided by this library +weak_library_names='$weak_libs' + +# Version information for $libname. +current=$current +age=$age +revision=$revision + +# Is this an already installed library? +installed=$installed + +# Should we warn about portability when linking against -modules? +shouldnotlink=$module + +# Files to dlopen/dlpreopen +dlopen='$dlfiles' +dlpreopen='$dlprefiles' + +# Directory that this library needs to be installed in: +libdir='$install_libdir'" + if test no,yes = "$installed,$need_relink"; then + $ECHO >> $output "\ +relink_command=\"$relink_command\"" + fi + done + } + + # Do a symbolic link so that the libtool archive can be found in + # LD_LIBRARY_PATH before the program is installed. + func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?' + ;; + esac + exit $EXIT_SUCCESS +} + +if test link = "$opt_mode" || test relink = "$opt_mode"; then + func_mode_link ${1+"$@"} +fi + + +# func_mode_uninstall arg... +func_mode_uninstall () +{ + $debug_cmd + + RM=$nonopt + files= + rmforce=false + exit_status=0 + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic=$magic + + for arg + do + case $arg in + -f) func_append RM " $arg"; rmforce=: ;; + -*) func_append RM " $arg" ;; + *) func_append files " $arg" ;; + esac + done + + test -z "$RM" && \ + func_fatal_help "you must specify an RM program" + + rmdirs= + + for file in $files; do + func_dirname "$file" "" "." + dir=$func_dirname_result + if test . = "$dir"; then + odir=$objdir + else + odir=$dir/$objdir + fi + func_basename "$file" + name=$func_basename_result + test uninstall = "$opt_mode" && odir=$dir + + # Remember odir for removal later, being careful to avoid duplicates + if test clean = "$opt_mode"; then + case " $rmdirs " in + *" $odir "*) ;; + *) func_append rmdirs " $odir" ;; + esac + fi + + # Don't error if the file doesn't exist and rm -f was used. + if { test -L "$file"; } >/dev/null 2>&1 || + { test -h "$file"; } >/dev/null 2>&1 || + test -f "$file"; then + : + elif test -d "$file"; then + exit_status=1 + continue + elif $rmforce; then + continue + fi + + rmfiles=$file + + case $name in + *.la) + # Possibly a libtool archive, so verify it. + if func_lalib_p "$file"; then + func_source $dir/$name + + # Delete the libtool libraries and symlinks. + for n in $library_names; do + func_append rmfiles " $odir/$n" + done + test -n "$old_library" && func_append rmfiles " $odir/$old_library" + + case $opt_mode in + clean) + case " $library_names " in + *" $dlname "*) ;; + *) test -n "$dlname" && func_append rmfiles " $odir/$dlname" ;; + esac + test -n "$libdir" && func_append rmfiles " $odir/$name $odir/${name}i" + ;; + uninstall) + if test -n "$library_names"; then + # Do each command in the postuninstall commands. + func_execute_cmds "$postuninstall_cmds" '$rmforce || exit_status=1' + fi + + if test -n "$old_library"; then + # Do each command in the old_postuninstall commands. + func_execute_cmds "$old_postuninstall_cmds" '$rmforce || exit_status=1' + fi + # FIXME: should reinstall the best remaining shared library. + ;; + esac + fi + ;; + + *.lo) + # Possibly a libtool object, so verify it. + if func_lalib_p "$file"; then + + # Read the .lo file + func_source $dir/$name + + # Add PIC object to the list of files to remove. + if test -n "$pic_object" && test none != "$pic_object"; then + func_append rmfiles " $dir/$pic_object" + fi + + # Add non-PIC object to the list of files to remove. + if test -n "$non_pic_object" && test none != "$non_pic_object"; then + func_append rmfiles " $dir/$non_pic_object" + fi + fi + ;; + + *) + if test clean = "$opt_mode"; then + noexename=$name + case $file in + *.exe) + func_stripname '' '.exe' "$file" + file=$func_stripname_result + func_stripname '' '.exe' "$name" + noexename=$func_stripname_result + # $file with .exe has already been added to rmfiles, + # add $file without .exe + func_append rmfiles " $file" + ;; + esac + # Do a test to see if this is a libtool program. + if func_ltwrapper_p "$file"; then + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + relink_command= + func_source $func_ltwrapper_scriptname_result + func_append rmfiles " $func_ltwrapper_scriptname_result" + else + relink_command= + func_source $dir/$noexename + fi + + # note $name still contains .exe if it was in $file originally + # as does the version of $file that was added into $rmfiles + func_append rmfiles " $odir/$name $odir/${name}S.$objext" + if test yes = "$fast_install" && test -n "$relink_command"; then + func_append rmfiles " $odir/lt-$name" + fi + if test "X$noexename" != "X$name"; then + func_append rmfiles " $odir/lt-$noexename.c" + fi + fi + fi + ;; + esac + func_show_eval "$RM $rmfiles" 'exit_status=1' + done + + # Try to remove the $objdir's in the directories where we deleted files + for dir in $rmdirs; do + if test -d "$dir"; then + func_show_eval "rmdir $dir >/dev/null 2>&1" + fi + done + + exit $exit_status +} + +if test uninstall = "$opt_mode" || test clean = "$opt_mode"; then + func_mode_uninstall ${1+"$@"} +fi + +test -z "$opt_mode" && { + help=$generic_help + func_fatal_help "you must specify a MODE" +} + +test -z "$exec_cmd" && \ + func_fatal_help "invalid operation mode '$opt_mode'" + +if test -n "$exec_cmd"; then + eval exec "$exec_cmd" + exit $EXIT_FAILURE +fi + +exit $exit_status + + +# The TAGs below are defined such that we never get into a situation +# where we disable both kinds of libraries. Given conflicting +# choices, we go for a static library, that is the most portable, +# since we can't tell whether shared libraries were disabled because +# the user asked for that or because the platform doesn't support +# them. This is particularly important on AIX, because we don't +# support having both static and shared libraries enabled at the same +# time on that platform, so we default to a shared-only configuration. +# If a disable-shared tag is given, we'll fallback to a static-only +# configuration. But we'll never go from static-only to shared-only. + +# ### BEGIN LIBTOOL TAG CONFIG: disable-shared +build_libtool_libs=no +build_old_libs=yes +# ### END LIBTOOL TAG CONFIG: disable-shared + +# ### BEGIN LIBTOOL TAG CONFIG: disable-static +build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` +# ### END LIBTOOL TAG CONFIG: disable-static + +# Local Variables: +# mode:shell-script +# sh-indentation:2 +# End: diff --git a/pkg/enet/enet/list.c b/pkg/enet/enet/list.c new file mode 100644 index 000000000..1c1a8dfaa --- /dev/null +++ b/pkg/enet/enet/list.c @@ -0,0 +1,75 @@ +/** + @file list.c + @brief ENet linked list functions +*/ +#define ENET_BUILDING_LIB 1 +#include "enet/enet.h" + +/** + @defgroup list ENet linked list utility functions + @ingroup private + @{ +*/ +void +enet_list_clear (ENetList * list) +{ + list -> sentinel.next = & list -> sentinel; + list -> sentinel.previous = & list -> sentinel; +} + +ENetListIterator +enet_list_insert (ENetListIterator position, void * data) +{ + ENetListIterator result = (ENetListIterator) data; + + result -> previous = position -> previous; + result -> next = position; + + result -> previous -> next = result; + position -> previous = result; + + return result; +} + +void * +enet_list_remove (ENetListIterator position) +{ + position -> previous -> next = position -> next; + position -> next -> previous = position -> previous; + + return position; +} + +ENetListIterator +enet_list_move (ENetListIterator position, void * dataFirst, void * dataLast) +{ + ENetListIterator first = (ENetListIterator) dataFirst, + last = (ENetListIterator) dataLast; + + first -> previous -> next = last -> next; + last -> next -> previous = first -> previous; + + first -> previous = position -> previous; + last -> next = position; + + first -> previous -> next = first; + position -> previous = last; + + return first; +} + +size_t +enet_list_size (ENetList * list) +{ + size_t size = 0; + ENetListIterator position; + + for (position = enet_list_begin (list); + position != enet_list_end (list); + position = enet_list_next (position)) + ++ size; + + return size; +} + +/** @} */ diff --git a/pkg/enet/enet/missing b/pkg/enet/enet/missing new file mode 100755 index 000000000..1fe1611f1 --- /dev/null +++ b/pkg/enet/enet/missing @@ -0,0 +1,215 @@ +#! /bin/sh +# Common wrapper for a few potentially missing GNU programs. + +scriptversion=2018-03-07.03; # UTC + +# Copyright (C) 1996-2021 Free Software Foundation, Inc. +# Originally written by Fran,cois Pinard , 1996. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +if test $# -eq 0; then + echo 1>&2 "Try '$0 --help' for more information" + exit 1 +fi + +case $1 in + + --is-lightweight) + # Used by our autoconf macros to check whether the available missing + # script is modern enough. + exit 0 + ;; + + --run) + # Back-compat with the calling convention used by older automake. + shift + ;; + + -h|--h|--he|--hel|--help) + echo "\ +$0 [OPTION]... PROGRAM [ARGUMENT]... + +Run 'PROGRAM [ARGUMENT]...', returning a proper advice when this fails due +to PROGRAM being missing or too old. + +Options: + -h, --help display this help and exit + -v, --version output version information and exit + +Supported PROGRAM values: + aclocal autoconf autoheader autom4te automake makeinfo + bison yacc flex lex help2man + +Version suffixes to PROGRAM as well as the prefixes 'gnu-', 'gnu', and +'g' are ignored when checking the name. + +Send bug reports to ." + exit $? + ;; + + -v|--v|--ve|--ver|--vers|--versi|--versio|--version) + echo "missing $scriptversion (GNU Automake)" + exit $? + ;; + + -*) + echo 1>&2 "$0: unknown '$1' option" + echo 1>&2 "Try '$0 --help' for more information" + exit 1 + ;; + +esac + +# Run the given program, remember its exit status. +"$@"; st=$? + +# If it succeeded, we are done. +test $st -eq 0 && exit 0 + +# Also exit now if we it failed (or wasn't found), and '--version' was +# passed; such an option is passed most likely to detect whether the +# program is present and works. +case $2 in --version|--help) exit $st;; esac + +# Exit code 63 means version mismatch. This often happens when the user +# tries to use an ancient version of a tool on a file that requires a +# minimum version. +if test $st -eq 63; then + msg="probably too old" +elif test $st -eq 127; then + # Program was missing. + msg="missing on your system" +else + # Program was found and executed, but failed. Give up. + exit $st +fi + +perl_URL=https://www.perl.org/ +flex_URL=https://github.com/westes/flex +gnu_software_URL=https://www.gnu.org/software + +program_details () +{ + case $1 in + aclocal|automake) + echo "The '$1' program is part of the GNU Automake package:" + echo "<$gnu_software_URL/automake>" + echo "It also requires GNU Autoconf, GNU m4 and Perl in order to run:" + echo "<$gnu_software_URL/autoconf>" + echo "<$gnu_software_URL/m4/>" + echo "<$perl_URL>" + ;; + autoconf|autom4te|autoheader) + echo "The '$1' program is part of the GNU Autoconf package:" + echo "<$gnu_software_URL/autoconf/>" + echo "It also requires GNU m4 and Perl in order to run:" + echo "<$gnu_software_URL/m4/>" + echo "<$perl_URL>" + ;; + esac +} + +give_advice () +{ + # Normalize program name to check for. + normalized_program=`echo "$1" | sed ' + s/^gnu-//; t + s/^gnu//; t + s/^g//; t'` + + printf '%s\n' "'$1' is $msg." + + configure_deps="'configure.ac' or m4 files included by 'configure.ac'" + case $normalized_program in + autoconf*) + echo "You should only need it if you modified 'configure.ac'," + echo "or m4 files included by it." + program_details 'autoconf' + ;; + autoheader*) + echo "You should only need it if you modified 'acconfig.h' or" + echo "$configure_deps." + program_details 'autoheader' + ;; + automake*) + echo "You should only need it if you modified 'Makefile.am' or" + echo "$configure_deps." + program_details 'automake' + ;; + aclocal*) + echo "You should only need it if you modified 'acinclude.m4' or" + echo "$configure_deps." + program_details 'aclocal' + ;; + autom4te*) + echo "You might have modified some maintainer files that require" + echo "the 'autom4te' program to be rebuilt." + program_details 'autom4te' + ;; + bison*|yacc*) + echo "You should only need it if you modified a '.y' file." + echo "You may want to install the GNU Bison package:" + echo "<$gnu_software_URL/bison/>" + ;; + lex*|flex*) + echo "You should only need it if you modified a '.l' file." + echo "You may want to install the Fast Lexical Analyzer package:" + echo "<$flex_URL>" + ;; + help2man*) + echo "You should only need it if you modified a dependency" \ + "of a man page." + echo "You may want to install the GNU Help2man package:" + echo "<$gnu_software_URL/help2man/>" + ;; + makeinfo*) + echo "You should only need it if you modified a '.texi' file, or" + echo "any other file indirectly affecting the aspect of the manual." + echo "You might want to install the Texinfo package:" + echo "<$gnu_software_URL/texinfo/>" + echo "The spurious makeinfo call might also be the consequence of" + echo "using a buggy 'make' (AIX, DU, IRIX), in which case you might" + echo "want to install GNU make:" + echo "<$gnu_software_URL/make/>" + ;; + *) + echo "You might have modified some files without having the proper" + echo "tools for further handling them. Check the 'README' file, it" + echo "often tells you about the needed prerequisites for installing" + echo "this package. You may also peek at any GNU archive site, in" + echo "case some other package contains this missing '$1' program." + ;; + esac +} + +give_advice "$1" | sed -e '1s/^/WARNING: /' \ + -e '2,$s/^/ /' >&2 + +# Propagate the correct exit status (expected to be 127 for a program +# not found, 63 for a program that failed due to version mismatch). +exit $st + +# Local variables: +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC0" +# time-stamp-end: "; # UTC" +# End: diff --git a/pkg/enet/enet/packet.c b/pkg/enet/enet/packet.c new file mode 100644 index 000000000..832cff96b --- /dev/null +++ b/pkg/enet/enet/packet.c @@ -0,0 +1,163 @@ +/** + @file packet.c + @brief ENet packet management functions +*/ +#include +#define ENET_BUILDING_LIB 1 +#include "enet/enet.h" + +/** @defgroup Packet ENet packet functions + @{ +*/ + +/** Creates a packet that may be sent to a peer. + @param data initial contents of the packet's data; the packet's data will remain uninitialized if data is NULL. + @param dataLength size of the data allocated for this packet + @param flags flags for this packet as described for the ENetPacket structure. + @returns the packet on success, NULL on failure +*/ +ENetPacket * +enet_packet_create (const void * data, size_t dataLength, enet_uint32 flags) +{ + ENetPacket * packet = (ENetPacket *) enet_malloc (sizeof (ENetPacket)); + if (packet == NULL) + return NULL; + + if (flags & ENET_PACKET_FLAG_NO_ALLOCATE) + packet -> data = (enet_uint8 *) data; + else + if (dataLength <= 0) + packet -> data = NULL; + else + { + packet -> data = (enet_uint8 *) enet_malloc (dataLength); + if (packet -> data == NULL) + { + enet_free (packet); + return NULL; + } + + if (data != NULL) + memcpy (packet -> data, data, dataLength); + } + + packet -> referenceCount = 0; + packet -> flags = flags; + packet -> dataLength = dataLength; + packet -> freeCallback = NULL; + packet -> userData = NULL; + + return packet; +} + +/** Destroys the packet and deallocates its data. + @param packet packet to be destroyed +*/ +void +enet_packet_destroy (ENetPacket * packet) +{ + if (packet == NULL) + return; + + if (packet -> freeCallback != NULL) + (* packet -> freeCallback) (packet); + if (! (packet -> flags & ENET_PACKET_FLAG_NO_ALLOCATE) && + packet -> data != NULL) + enet_free (packet -> data); + enet_free (packet); +} + +/** Attempts to resize the data in the packet to length specified in the + dataLength parameter + @param packet packet to resize + @param dataLength new size for the packet data + @returns 0 on success, < 0 on failure +*/ +int +enet_packet_resize (ENetPacket * packet, size_t dataLength) +{ + enet_uint8 * newData; + + if (dataLength <= packet -> dataLength || (packet -> flags & ENET_PACKET_FLAG_NO_ALLOCATE)) + { + packet -> dataLength = dataLength; + + return 0; + } + + newData = (enet_uint8 *) enet_malloc (dataLength); + if (newData == NULL) + return -1; + + if (packet -> data != NULL) + { + if (packet -> dataLength > 0) + memcpy (newData, packet -> data, packet -> dataLength); + + enet_free (packet -> data); + } + + packet -> data = newData; + packet -> dataLength = dataLength; + + return 0; +} + +static const enet_uint32 crcTable [256] = +{ + 0, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, + 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, + 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, + 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, + 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, + 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, + 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, + 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, + 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, + 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, + 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, + 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, + 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, + 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, + 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, + 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, + 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, + 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, + 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, + 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, + 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x5005713, + 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0xBDBDF21, + 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, + 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, + 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, + 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, + 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D +}; + +enet_uint32 +enet_crc32 (const ENetBuffer * buffers, size_t bufferCount) +{ + enet_uint32 crc = 0xFFFFFFFF; + + while (bufferCount -- > 0) + { + const enet_uint8 * data = (const enet_uint8 *) buffers -> data, + * dataEnd = & data [buffers -> dataLength]; + + while (data < dataEnd) + { + crc = (crc >> 8) ^ crcTable [(crc & 0xFF) ^ *data++]; + } + + ++ buffers; + } + + return ENET_HOST_TO_NET_32 (~ crc); +} + +/** @} */ diff --git a/pkg/enet/enet/peer.c b/pkg/enet/enet/peer.c new file mode 100644 index 000000000..df8f40f30 --- /dev/null +++ b/pkg/enet/enet/peer.c @@ -0,0 +1,1030 @@ +/** + @file peer.c + @brief ENet peer management functions +*/ +#include +#define ENET_BUILDING_LIB 1 +#include "enet/utility.h" +#include "enet/enet.h" + +/** @defgroup peer ENet peer functions + @{ +*/ + +/** Configures throttle parameter for a peer. + + Unreliable packets are dropped by ENet in response to the varying conditions + of the Internet connection to the peer. The throttle represents a probability + that an unreliable packet should not be dropped and thus sent by ENet to the peer. + The lowest mean round trip time from the sending of a reliable packet to the + receipt of its acknowledgement is measured over an amount of time specified by + the interval parameter in milliseconds. If a measured round trip time happens to + be significantly less than the mean round trip time measured over the interval, + then the throttle probability is increased to allow more traffic by an amount + specified in the acceleration parameter, which is a ratio to the ENET_PEER_PACKET_THROTTLE_SCALE + constant. If a measured round trip time happens to be significantly greater than + the mean round trip time measured over the interval, then the throttle probability + is decreased to limit traffic by an amount specified in the deceleration parameter, which + is a ratio to the ENET_PEER_PACKET_THROTTLE_SCALE constant. When the throttle has + a value of ENET_PEER_PACKET_THROTTLE_SCALE, no unreliable packets are dropped by + ENet, and so 100% of all unreliable packets will be sent. When the throttle has a + value of 0, all unreliable packets are dropped by ENet, and so 0% of all unreliable + packets will be sent. Intermediate values for the throttle represent intermediate + probabilities between 0% and 100% of unreliable packets being sent. The bandwidth + limits of the local and foreign hosts are taken into account to determine a + sensible limit for the throttle probability above which it should not raise even in + the best of conditions. + + @param peer peer to configure + @param interval interval, in milliseconds, over which to measure lowest mean RTT; the default value is ENET_PEER_PACKET_THROTTLE_INTERVAL. + @param acceleration rate at which to increase the throttle probability as mean RTT declines + @param deceleration rate at which to decrease the throttle probability as mean RTT increases +*/ +void +enet_peer_throttle_configure (ENetPeer * peer, enet_uint32 interval, enet_uint32 acceleration, enet_uint32 deceleration) +{ + ENetProtocol command; + + peer -> packetThrottleInterval = interval; + peer -> packetThrottleAcceleration = acceleration; + peer -> packetThrottleDeceleration = deceleration; + + command.header.command = ENET_PROTOCOL_COMMAND_THROTTLE_CONFIGURE | ENET_PROTOCOL_COMMAND_FLAG_ACKNOWLEDGE; + command.header.channelID = 0xFF; + + command.throttleConfigure.packetThrottleInterval = ENET_HOST_TO_NET_32 (interval); + command.throttleConfigure.packetThrottleAcceleration = ENET_HOST_TO_NET_32 (acceleration); + command.throttleConfigure.packetThrottleDeceleration = ENET_HOST_TO_NET_32 (deceleration); + + enet_peer_queue_outgoing_command (peer, & command, NULL, 0, 0); +} + +int +enet_peer_throttle (ENetPeer * peer, enet_uint32 rtt) +{ + if (peer -> lastRoundTripTime <= peer -> lastRoundTripTimeVariance) + { + peer -> packetThrottle = peer -> packetThrottleLimit; + } + else + if (rtt <= peer -> lastRoundTripTime) + { + peer -> packetThrottle += peer -> packetThrottleAcceleration; + + if (peer -> packetThrottle > peer -> packetThrottleLimit) + peer -> packetThrottle = peer -> packetThrottleLimit; + + return 1; + } + else + if (rtt > peer -> lastRoundTripTime + 2 * peer -> lastRoundTripTimeVariance) + { + if (peer -> packetThrottle > peer -> packetThrottleDeceleration) + peer -> packetThrottle -= peer -> packetThrottleDeceleration; + else + peer -> packetThrottle = 0; + + return -1; + } + + return 0; +} + +/** Queues a packet to be sent. + + On success, ENet will assume ownership of the packet, and so enet_packet_destroy + should not be called on it thereafter. On failure, the caller still must destroy + the packet on its own as ENet has not queued the packet. The caller can also + check the packet's referenceCount field after sending to check if ENet queued + the packet and thus incremented the referenceCount. + + @param peer destination for the packet + @param channelID channel on which to send + @param packet packet to send + @retval 0 on success + @retval < 0 on failure +*/ +int +enet_peer_send (ENetPeer * peer, enet_uint8 channelID, ENetPacket * packet) +{ + ENetChannel * channel; + ENetProtocol command; + size_t fragmentLength; + + if (peer -> state != ENET_PEER_STATE_CONNECTED || + channelID >= peer -> channelCount || + packet -> dataLength > peer -> host -> maximumPacketSize) + return -1; + + channel = & peer -> channels [channelID]; + fragmentLength = peer -> mtu - sizeof (ENetProtocolHeader) - sizeof (ENetProtocolSendFragment); + if (peer -> host -> checksum != NULL) + fragmentLength -= sizeof(enet_uint32); + + if (packet -> dataLength > fragmentLength) + { + enet_uint32 fragmentCount = (packet -> dataLength + fragmentLength - 1) / fragmentLength, + fragmentNumber, + fragmentOffset; + enet_uint8 commandNumber; + enet_uint16 startSequenceNumber; + ENetList fragments; + ENetOutgoingCommand * fragment; + + if (fragmentCount > ENET_PROTOCOL_MAXIMUM_FRAGMENT_COUNT) + return -1; + + if ((packet -> flags & (ENET_PACKET_FLAG_RELIABLE | ENET_PACKET_FLAG_UNRELIABLE_FRAGMENT)) == ENET_PACKET_FLAG_UNRELIABLE_FRAGMENT && + channel -> outgoingUnreliableSequenceNumber < 0xFFFF) + { + commandNumber = ENET_PROTOCOL_COMMAND_SEND_UNRELIABLE_FRAGMENT; + startSequenceNumber = ENET_HOST_TO_NET_16 (channel -> outgoingUnreliableSequenceNumber + 1); + } + else + { + commandNumber = ENET_PROTOCOL_COMMAND_SEND_FRAGMENT | ENET_PROTOCOL_COMMAND_FLAG_ACKNOWLEDGE; + startSequenceNumber = ENET_HOST_TO_NET_16 (channel -> outgoingReliableSequenceNumber + 1); + } + + enet_list_clear (& fragments); + + for (fragmentNumber = 0, + fragmentOffset = 0; + fragmentOffset < packet -> dataLength; + ++ fragmentNumber, + fragmentOffset += fragmentLength) + { + if (packet -> dataLength - fragmentOffset < fragmentLength) + fragmentLength = packet -> dataLength - fragmentOffset; + + fragment = (ENetOutgoingCommand *) enet_malloc (sizeof (ENetOutgoingCommand)); + if (fragment == NULL) + { + while (! enet_list_empty (& fragments)) + { + fragment = (ENetOutgoingCommand *) enet_list_remove (enet_list_begin (& fragments)); + + enet_free (fragment); + } + + return -1; + } + + fragment -> fragmentOffset = fragmentOffset; + fragment -> fragmentLength = fragmentLength; + fragment -> packet = packet; + fragment -> command.header.command = commandNumber; + fragment -> command.header.channelID = channelID; + fragment -> command.sendFragment.startSequenceNumber = startSequenceNumber; + fragment -> command.sendFragment.dataLength = ENET_HOST_TO_NET_16 (fragmentLength); + fragment -> command.sendFragment.fragmentCount = ENET_HOST_TO_NET_32 (fragmentCount); + fragment -> command.sendFragment.fragmentNumber = ENET_HOST_TO_NET_32 (fragmentNumber); + fragment -> command.sendFragment.totalLength = ENET_HOST_TO_NET_32 (packet -> dataLength); + fragment -> command.sendFragment.fragmentOffset = ENET_NET_TO_HOST_32 (fragmentOffset); + + enet_list_insert (enet_list_end (& fragments), fragment); + } + + packet -> referenceCount += fragmentNumber; + + while (! enet_list_empty (& fragments)) + { + fragment = (ENetOutgoingCommand *) enet_list_remove (enet_list_begin (& fragments)); + + enet_peer_setup_outgoing_command (peer, fragment); + } + + return 0; + } + + command.header.channelID = channelID; + + if ((packet -> flags & (ENET_PACKET_FLAG_RELIABLE | ENET_PACKET_FLAG_UNSEQUENCED)) == ENET_PACKET_FLAG_UNSEQUENCED) + { + command.header.command = ENET_PROTOCOL_COMMAND_SEND_UNSEQUENCED | ENET_PROTOCOL_COMMAND_FLAG_UNSEQUENCED; + command.sendUnsequenced.dataLength = ENET_HOST_TO_NET_16 (packet -> dataLength); + } + else + if (packet -> flags & ENET_PACKET_FLAG_RELIABLE || channel -> outgoingUnreliableSequenceNumber >= 0xFFFF) + { + command.header.command = ENET_PROTOCOL_COMMAND_SEND_RELIABLE | ENET_PROTOCOL_COMMAND_FLAG_ACKNOWLEDGE; + command.sendReliable.dataLength = ENET_HOST_TO_NET_16 (packet -> dataLength); + } + else + { + command.header.command = ENET_PROTOCOL_COMMAND_SEND_UNRELIABLE; + command.sendUnreliable.dataLength = ENET_HOST_TO_NET_16 (packet -> dataLength); + } + + if (enet_peer_queue_outgoing_command (peer, & command, packet, 0, packet -> dataLength) == NULL) + return -1; + + return 0; +} + +/** Attempts to dequeue any incoming queued packet. + @param peer peer to dequeue packets from + @param channelID holds the channel ID of the channel the packet was received on success + @returns a pointer to the packet, or NULL if there are no available incoming queued packets +*/ +ENetPacket * +enet_peer_receive (ENetPeer * peer, enet_uint8 * channelID) +{ + ENetIncomingCommand * incomingCommand; + ENetPacket * packet; + + if (enet_list_empty (& peer -> dispatchedCommands)) + return NULL; + + incomingCommand = (ENetIncomingCommand *) enet_list_remove (enet_list_begin (& peer -> dispatchedCommands)); + + if (channelID != NULL) + * channelID = incomingCommand -> command.header.channelID; + + packet = incomingCommand -> packet; + + -- packet -> referenceCount; + + if (incomingCommand -> fragments != NULL) + enet_free (incomingCommand -> fragments); + + enet_free (incomingCommand); + + peer -> totalWaitingData -= ENET_MIN (peer -> totalWaitingData, packet -> dataLength); + + return packet; +} + +static void +enet_peer_reset_outgoing_commands (ENetPeer * peer, ENetList * queue) +{ + ENetOutgoingCommand * outgoingCommand; + + while (! enet_list_empty (queue)) + { + outgoingCommand = (ENetOutgoingCommand *) enet_list_remove (enet_list_begin (queue)); + + if (outgoingCommand -> packet != NULL) + { + -- outgoingCommand -> packet -> referenceCount; + + if (outgoingCommand -> packet -> referenceCount == 0) + enet_packet_destroy (outgoingCommand -> packet); + } + + enet_free (outgoingCommand); + } +} + +static void +enet_peer_remove_incoming_commands (ENetPeer * peer, ENetList * queue, ENetListIterator startCommand, ENetListIterator endCommand, ENetIncomingCommand * excludeCommand) +{ + ENetListIterator currentCommand; + + for (currentCommand = startCommand; currentCommand != endCommand; ) + { + ENetIncomingCommand * incomingCommand = (ENetIncomingCommand *) currentCommand; + + currentCommand = enet_list_next (currentCommand); + + if (incomingCommand == excludeCommand) + continue; + + enet_list_remove (& incomingCommand -> incomingCommandList); + + if (incomingCommand -> packet != NULL) + { + -- incomingCommand -> packet -> referenceCount; + + peer -> totalWaitingData -= ENET_MIN (peer -> totalWaitingData, incomingCommand -> packet -> dataLength); + + if (incomingCommand -> packet -> referenceCount == 0) + enet_packet_destroy (incomingCommand -> packet); + } + + if (incomingCommand -> fragments != NULL) + enet_free (incomingCommand -> fragments); + + enet_free (incomingCommand); + } +} + +static void +enet_peer_reset_incoming_commands (ENetPeer * peer, ENetList * queue) +{ + enet_peer_remove_incoming_commands(peer, queue, enet_list_begin (queue), enet_list_end (queue), NULL); +} + +void +enet_peer_reset_queues (ENetPeer * peer) +{ + ENetChannel * channel; + + if (peer -> flags & ENET_PEER_FLAG_NEEDS_DISPATCH) + { + enet_list_remove (& peer -> dispatchList); + + peer -> flags &= ~ ENET_PEER_FLAG_NEEDS_DISPATCH; + } + + while (! enet_list_empty (& peer -> acknowledgements)) + enet_free (enet_list_remove (enet_list_begin (& peer -> acknowledgements))); + + enet_peer_reset_outgoing_commands (peer, & peer -> sentReliableCommands); + enet_peer_reset_outgoing_commands (peer, & peer -> outgoingCommands); + enet_peer_reset_outgoing_commands (peer, & peer -> outgoingSendReliableCommands); + enet_peer_reset_incoming_commands (peer, & peer -> dispatchedCommands); + + if (peer -> channels != NULL && peer -> channelCount > 0) + { + for (channel = peer -> channels; + channel < & peer -> channels [peer -> channelCount]; + ++ channel) + { + enet_peer_reset_incoming_commands (peer, & channel -> incomingReliableCommands); + enet_peer_reset_incoming_commands (peer, & channel -> incomingUnreliableCommands); + } + + enet_free (peer -> channels); + } + + peer -> channels = NULL; + peer -> channelCount = 0; +} + +void +enet_peer_on_connect (ENetPeer * peer) +{ + if (peer -> state != ENET_PEER_STATE_CONNECTED && peer -> state != ENET_PEER_STATE_DISCONNECT_LATER) + { + if (peer -> incomingBandwidth != 0) + ++ peer -> host -> bandwidthLimitedPeers; + + ++ peer -> host -> connectedPeers; + } +} + +void +enet_peer_on_disconnect (ENetPeer * peer) +{ + if (peer -> state == ENET_PEER_STATE_CONNECTED || peer -> state == ENET_PEER_STATE_DISCONNECT_LATER) + { + if (peer -> incomingBandwidth != 0) + -- peer -> host -> bandwidthLimitedPeers; + + -- peer -> host -> connectedPeers; + } +} + +/** Forcefully disconnects a peer. + @param peer peer to forcefully disconnect + @remarks The foreign host represented by the peer is not notified of the disconnection and will timeout + on its connection to the local host. +*/ +void +enet_peer_reset (ENetPeer * peer) +{ + enet_peer_on_disconnect (peer); + + peer -> outgoingPeerID = ENET_PROTOCOL_MAXIMUM_PEER_ID; + peer -> connectID = 0; + + peer -> state = ENET_PEER_STATE_DISCONNECTED; + + peer -> incomingBandwidth = 0; + peer -> outgoingBandwidth = 0; + peer -> incomingBandwidthThrottleEpoch = 0; + peer -> outgoingBandwidthThrottleEpoch = 0; + peer -> incomingDataTotal = 0; + peer -> outgoingDataTotal = 0; + peer -> lastSendTime = 0; + peer -> lastReceiveTime = 0; + peer -> nextTimeout = 0; + peer -> earliestTimeout = 0; + peer -> packetLossEpoch = 0; + peer -> packetsSent = 0; + peer -> packetsLost = 0; + peer -> packetLoss = 0; + peer -> packetLossVariance = 0; + peer -> packetThrottle = ENET_PEER_DEFAULT_PACKET_THROTTLE; + peer -> packetThrottleLimit = ENET_PEER_PACKET_THROTTLE_SCALE; + peer -> packetThrottleCounter = 0; + peer -> packetThrottleEpoch = 0; + peer -> packetThrottleAcceleration = ENET_PEER_PACKET_THROTTLE_ACCELERATION; + peer -> packetThrottleDeceleration = ENET_PEER_PACKET_THROTTLE_DECELERATION; + peer -> packetThrottleInterval = ENET_PEER_PACKET_THROTTLE_INTERVAL; + peer -> pingInterval = ENET_PEER_PING_INTERVAL; + peer -> timeoutLimit = ENET_PEER_TIMEOUT_LIMIT; + peer -> timeoutMinimum = ENET_PEER_TIMEOUT_MINIMUM; + peer -> timeoutMaximum = ENET_PEER_TIMEOUT_MAXIMUM; + peer -> lastRoundTripTime = ENET_PEER_DEFAULT_ROUND_TRIP_TIME; + peer -> lowestRoundTripTime = ENET_PEER_DEFAULT_ROUND_TRIP_TIME; + peer -> lastRoundTripTimeVariance = 0; + peer -> highestRoundTripTimeVariance = 0; + peer -> roundTripTime = ENET_PEER_DEFAULT_ROUND_TRIP_TIME; + peer -> roundTripTimeVariance = 0; + peer -> mtu = peer -> host -> mtu; + peer -> reliableDataInTransit = 0; + peer -> outgoingReliableSequenceNumber = 0; + peer -> windowSize = ENET_PROTOCOL_MAXIMUM_WINDOW_SIZE; + peer -> incomingUnsequencedGroup = 0; + peer -> outgoingUnsequencedGroup = 0; + peer -> eventData = 0; + peer -> totalWaitingData = 0; + peer -> flags = 0; + + memset (peer -> unsequencedWindow, 0, sizeof (peer -> unsequencedWindow)); + + enet_peer_reset_queues (peer); +} + +/** Sends a ping request to a peer. + @param peer destination for the ping request + @remarks ping requests factor into the mean round trip time as designated by the + roundTripTime field in the ENetPeer structure. ENet automatically pings all connected + peers at regular intervals, however, this function may be called to ensure more + frequent ping requests. +*/ +void +enet_peer_ping (ENetPeer * peer) +{ + ENetProtocol command; + + if (peer -> state != ENET_PEER_STATE_CONNECTED) + return; + + command.header.command = ENET_PROTOCOL_COMMAND_PING | ENET_PROTOCOL_COMMAND_FLAG_ACKNOWLEDGE; + command.header.channelID = 0xFF; + + enet_peer_queue_outgoing_command (peer, & command, NULL, 0, 0); +} + +/** Sets the interval at which pings will be sent to a peer. + + Pings are used both to monitor the liveness of the connection and also to dynamically + adjust the throttle during periods of low traffic so that the throttle has reasonable + responsiveness during traffic spikes. + + @param peer the peer to adjust + @param pingInterval the interval at which to send pings; defaults to ENET_PEER_PING_INTERVAL if 0 +*/ +void +enet_peer_ping_interval (ENetPeer * peer, enet_uint32 pingInterval) +{ + peer -> pingInterval = pingInterval ? pingInterval : ENET_PEER_PING_INTERVAL; +} + +/** Sets the timeout parameters for a peer. + + The timeout parameter control how and when a peer will timeout from a failure to acknowledge + reliable traffic. Timeout values use an exponential backoff mechanism, where if a reliable + packet is not acknowledge within some multiple of the average RTT plus a variance tolerance, + the timeout will be doubled until it reaches a set limit. If the timeout is thus at this + limit and reliable packets have been sent but not acknowledged within a certain minimum time + period, the peer will be disconnected. Alternatively, if reliable packets have been sent + but not acknowledged for a certain maximum time period, the peer will be disconnected regardless + of the current timeout limit value. + + @param peer the peer to adjust + @param timeoutLimit the timeout limit; defaults to ENET_PEER_TIMEOUT_LIMIT if 0 + @param timeoutMinimum the timeout minimum; defaults to ENET_PEER_TIMEOUT_MINIMUM if 0 + @param timeoutMaximum the timeout maximum; defaults to ENET_PEER_TIMEOUT_MAXIMUM if 0 +*/ + +void +enet_peer_timeout (ENetPeer * peer, enet_uint32 timeoutLimit, enet_uint32 timeoutMinimum, enet_uint32 timeoutMaximum) +{ + peer -> timeoutLimit = timeoutLimit ? timeoutLimit : ENET_PEER_TIMEOUT_LIMIT; + peer -> timeoutMinimum = timeoutMinimum ? timeoutMinimum : ENET_PEER_TIMEOUT_MINIMUM; + peer -> timeoutMaximum = timeoutMaximum ? timeoutMaximum : ENET_PEER_TIMEOUT_MAXIMUM; +} + +/** Force an immediate disconnection from a peer. + @param peer peer to disconnect + @param data data describing the disconnection + @remarks No ENET_EVENT_DISCONNECT event will be generated. The foreign peer is not + guaranteed to receive the disconnect notification, and is reset immediately upon + return from this function. +*/ +void +enet_peer_disconnect_now (ENetPeer * peer, enet_uint32 data) +{ + ENetProtocol command; + + if (peer -> state == ENET_PEER_STATE_DISCONNECTED) + return; + + if (peer -> state != ENET_PEER_STATE_ZOMBIE && + peer -> state != ENET_PEER_STATE_DISCONNECTING) + { + enet_peer_reset_queues (peer); + + command.header.command = ENET_PROTOCOL_COMMAND_DISCONNECT | ENET_PROTOCOL_COMMAND_FLAG_UNSEQUENCED; + command.header.channelID = 0xFF; + command.disconnect.data = ENET_HOST_TO_NET_32 (data); + + enet_peer_queue_outgoing_command (peer, & command, NULL, 0, 0); + + enet_host_flush (peer -> host); + } + + enet_peer_reset (peer); +} + +/** Request a disconnection from a peer. + @param peer peer to request a disconnection + @param data data describing the disconnection + @remarks An ENET_EVENT_DISCONNECT event will be generated by enet_host_service() + once the disconnection is complete. +*/ +void +enet_peer_disconnect (ENetPeer * peer, enet_uint32 data) +{ + ENetProtocol command; + + if (peer -> state == ENET_PEER_STATE_DISCONNECTING || + peer -> state == ENET_PEER_STATE_DISCONNECTED || + peer -> state == ENET_PEER_STATE_ACKNOWLEDGING_DISCONNECT || + peer -> state == ENET_PEER_STATE_ZOMBIE) + return; + + enet_peer_reset_queues (peer); + + command.header.command = ENET_PROTOCOL_COMMAND_DISCONNECT; + command.header.channelID = 0xFF; + command.disconnect.data = ENET_HOST_TO_NET_32 (data); + + if (peer -> state == ENET_PEER_STATE_CONNECTED || peer -> state == ENET_PEER_STATE_DISCONNECT_LATER) + command.header.command |= ENET_PROTOCOL_COMMAND_FLAG_ACKNOWLEDGE; + else + command.header.command |= ENET_PROTOCOL_COMMAND_FLAG_UNSEQUENCED; + + enet_peer_queue_outgoing_command (peer, & command, NULL, 0, 0); + + if (peer -> state == ENET_PEER_STATE_CONNECTED || peer -> state == ENET_PEER_STATE_DISCONNECT_LATER) + { + enet_peer_on_disconnect (peer); + + peer -> state = ENET_PEER_STATE_DISCONNECTING; + } + else + { + enet_host_flush (peer -> host); + enet_peer_reset (peer); + } +} + +int +enet_peer_has_outgoing_commands (ENetPeer * peer) +{ + if (enet_list_empty (& peer -> outgoingCommands) && + enet_list_empty (& peer -> outgoingSendReliableCommands) && + enet_list_empty (& peer -> sentReliableCommands)) + return 0; + + return 1; +} + +/** Request a disconnection from a peer, but only after all queued outgoing packets are sent. + @param peer peer to request a disconnection + @param data data describing the disconnection + @remarks An ENET_EVENT_DISCONNECT event will be generated by enet_host_service() + once the disconnection is complete. +*/ +void +enet_peer_disconnect_later (ENetPeer * peer, enet_uint32 data) +{ + if ((peer -> state == ENET_PEER_STATE_CONNECTED || peer -> state == ENET_PEER_STATE_DISCONNECT_LATER) && + enet_peer_has_outgoing_commands (peer)) + { + peer -> state = ENET_PEER_STATE_DISCONNECT_LATER; + peer -> eventData = data; + } + else + enet_peer_disconnect (peer, data); +} + +ENetAcknowledgement * +enet_peer_queue_acknowledgement (ENetPeer * peer, const ENetProtocol * command, enet_uint16 sentTime) +{ + ENetAcknowledgement * acknowledgement; + + if (command -> header.channelID < peer -> channelCount) + { + ENetChannel * channel = & peer -> channels [command -> header.channelID]; + enet_uint16 reliableWindow = command -> header.reliableSequenceNumber / ENET_PEER_RELIABLE_WINDOW_SIZE, + currentWindow = channel -> incomingReliableSequenceNumber / ENET_PEER_RELIABLE_WINDOW_SIZE; + + if (command -> header.reliableSequenceNumber < channel -> incomingReliableSequenceNumber) + reliableWindow += ENET_PEER_RELIABLE_WINDOWS; + + if (reliableWindow >= currentWindow + ENET_PEER_FREE_RELIABLE_WINDOWS - 1 && reliableWindow <= currentWindow + ENET_PEER_FREE_RELIABLE_WINDOWS) + return NULL; + } + + acknowledgement = (ENetAcknowledgement *) enet_malloc (sizeof (ENetAcknowledgement)); + if (acknowledgement == NULL) + return NULL; + + peer -> outgoingDataTotal += sizeof (ENetProtocolAcknowledge); + + acknowledgement -> sentTime = sentTime; + acknowledgement -> command = * command; + + enet_list_insert (enet_list_end (& peer -> acknowledgements), acknowledgement); + + return acknowledgement; +} + +void +enet_peer_setup_outgoing_command (ENetPeer * peer, ENetOutgoingCommand * outgoingCommand) +{ + peer -> outgoingDataTotal += enet_protocol_command_size (outgoingCommand -> command.header.command) + outgoingCommand -> fragmentLength; + + if (outgoingCommand -> command.header.channelID == 0xFF) + { + ++ peer -> outgoingReliableSequenceNumber; + + outgoingCommand -> reliableSequenceNumber = peer -> outgoingReliableSequenceNumber; + outgoingCommand -> unreliableSequenceNumber = 0; + } + else + { + ENetChannel * channel = & peer -> channels [outgoingCommand -> command.header.channelID]; + + if (outgoingCommand -> command.header.command & ENET_PROTOCOL_COMMAND_FLAG_ACKNOWLEDGE) + { + ++ channel -> outgoingReliableSequenceNumber; + channel -> outgoingUnreliableSequenceNumber = 0; + + outgoingCommand -> reliableSequenceNumber = channel -> outgoingReliableSequenceNumber; + outgoingCommand -> unreliableSequenceNumber = 0; + } + else + if (outgoingCommand -> command.header.command & ENET_PROTOCOL_COMMAND_FLAG_UNSEQUENCED) + { + ++ peer -> outgoingUnsequencedGroup; + + outgoingCommand -> reliableSequenceNumber = 0; + outgoingCommand -> unreliableSequenceNumber = 0; + } + else + { + if (outgoingCommand -> fragmentOffset == 0) + ++ channel -> outgoingUnreliableSequenceNumber; + + outgoingCommand -> reliableSequenceNumber = channel -> outgoingReliableSequenceNumber; + outgoingCommand -> unreliableSequenceNumber = channel -> outgoingUnreliableSequenceNumber; + } + } + + outgoingCommand -> sendAttempts = 0; + outgoingCommand -> sentTime = 0; + outgoingCommand -> roundTripTimeout = 0; + outgoingCommand -> command.header.reliableSequenceNumber = ENET_HOST_TO_NET_16 (outgoingCommand -> reliableSequenceNumber); + outgoingCommand -> queueTime = ++ peer -> host -> totalQueued; + + switch (outgoingCommand -> command.header.command & ENET_PROTOCOL_COMMAND_MASK) + { + case ENET_PROTOCOL_COMMAND_SEND_UNRELIABLE: + outgoingCommand -> command.sendUnreliable.unreliableSequenceNumber = ENET_HOST_TO_NET_16 (outgoingCommand -> unreliableSequenceNumber); + break; + + case ENET_PROTOCOL_COMMAND_SEND_UNSEQUENCED: + outgoingCommand -> command.sendUnsequenced.unsequencedGroup = ENET_HOST_TO_NET_16 (peer -> outgoingUnsequencedGroup); + break; + + default: + break; + } + + if ((outgoingCommand -> command.header.command & ENET_PROTOCOL_COMMAND_FLAG_ACKNOWLEDGE) != 0 && + outgoingCommand -> packet != NULL) + enet_list_insert (enet_list_end (& peer -> outgoingSendReliableCommands), outgoingCommand); + else + enet_list_insert (enet_list_end (& peer -> outgoingCommands), outgoingCommand); +} + +ENetOutgoingCommand * +enet_peer_queue_outgoing_command (ENetPeer * peer, const ENetProtocol * command, ENetPacket * packet, enet_uint32 offset, enet_uint16 length) +{ + ENetOutgoingCommand * outgoingCommand = (ENetOutgoingCommand *) enet_malloc (sizeof (ENetOutgoingCommand)); + if (outgoingCommand == NULL) + return NULL; + + outgoingCommand -> command = * command; + outgoingCommand -> fragmentOffset = offset; + outgoingCommand -> fragmentLength = length; + outgoingCommand -> packet = packet; + if (packet != NULL) + ++ packet -> referenceCount; + + enet_peer_setup_outgoing_command (peer, outgoingCommand); + + return outgoingCommand; +} + +void +enet_peer_dispatch_incoming_unreliable_commands (ENetPeer * peer, ENetChannel * channel, ENetIncomingCommand * queuedCommand) +{ + ENetListIterator droppedCommand, startCommand, currentCommand; + + for (droppedCommand = startCommand = currentCommand = enet_list_begin (& channel -> incomingUnreliableCommands); + currentCommand != enet_list_end (& channel -> incomingUnreliableCommands); + currentCommand = enet_list_next (currentCommand)) + { + ENetIncomingCommand * incomingCommand = (ENetIncomingCommand *) currentCommand; + + if ((incomingCommand -> command.header.command & ENET_PROTOCOL_COMMAND_MASK) == ENET_PROTOCOL_COMMAND_SEND_UNSEQUENCED) + continue; + + if (incomingCommand -> reliableSequenceNumber == channel -> incomingReliableSequenceNumber) + { + if (incomingCommand -> fragmentsRemaining <= 0) + { + channel -> incomingUnreliableSequenceNumber = incomingCommand -> unreliableSequenceNumber; + continue; + } + + if (startCommand != currentCommand) + { + enet_list_move (enet_list_end (& peer -> dispatchedCommands), startCommand, enet_list_previous (currentCommand)); + + if (! (peer -> flags & ENET_PEER_FLAG_NEEDS_DISPATCH)) + { + enet_list_insert (enet_list_end (& peer -> host -> dispatchQueue), & peer -> dispatchList); + + peer -> flags |= ENET_PEER_FLAG_NEEDS_DISPATCH; + } + + droppedCommand = currentCommand; + } + else + if (droppedCommand != currentCommand) + droppedCommand = enet_list_previous (currentCommand); + } + else + { + enet_uint16 reliableWindow = incomingCommand -> reliableSequenceNumber / ENET_PEER_RELIABLE_WINDOW_SIZE, + currentWindow = channel -> incomingReliableSequenceNumber / ENET_PEER_RELIABLE_WINDOW_SIZE; + if (incomingCommand -> reliableSequenceNumber < channel -> incomingReliableSequenceNumber) + reliableWindow += ENET_PEER_RELIABLE_WINDOWS; + if (reliableWindow >= currentWindow && reliableWindow < currentWindow + ENET_PEER_FREE_RELIABLE_WINDOWS - 1) + break; + + droppedCommand = enet_list_next (currentCommand); + + if (startCommand != currentCommand) + { + enet_list_move (enet_list_end (& peer -> dispatchedCommands), startCommand, enet_list_previous (currentCommand)); + + if (! (peer -> flags & ENET_PEER_FLAG_NEEDS_DISPATCH)) + { + enet_list_insert (enet_list_end (& peer -> host -> dispatchQueue), & peer -> dispatchList); + + peer -> flags |= ENET_PEER_FLAG_NEEDS_DISPATCH; + } + } + } + + startCommand = enet_list_next (currentCommand); + } + + if (startCommand != currentCommand) + { + enet_list_move (enet_list_end (& peer -> dispatchedCommands), startCommand, enet_list_previous (currentCommand)); + + if (! (peer -> flags & ENET_PEER_FLAG_NEEDS_DISPATCH)) + { + enet_list_insert (enet_list_end (& peer -> host -> dispatchQueue), & peer -> dispatchList); + + peer -> flags |= ENET_PEER_FLAG_NEEDS_DISPATCH; + } + + droppedCommand = currentCommand; + } + + enet_peer_remove_incoming_commands (peer, & channel -> incomingUnreliableCommands, enet_list_begin (& channel -> incomingUnreliableCommands), droppedCommand, queuedCommand); +} + +void +enet_peer_dispatch_incoming_reliable_commands (ENetPeer * peer, ENetChannel * channel, ENetIncomingCommand * queuedCommand) +{ + ENetListIterator currentCommand; + + for (currentCommand = enet_list_begin (& channel -> incomingReliableCommands); + currentCommand != enet_list_end (& channel -> incomingReliableCommands); + currentCommand = enet_list_next (currentCommand)) + { + ENetIncomingCommand * incomingCommand = (ENetIncomingCommand *) currentCommand; + + if (incomingCommand -> fragmentsRemaining > 0 || + incomingCommand -> reliableSequenceNumber != (enet_uint16) (channel -> incomingReliableSequenceNumber + 1)) + break; + + channel -> incomingReliableSequenceNumber = incomingCommand -> reliableSequenceNumber; + + if (incomingCommand -> fragmentCount > 0) + channel -> incomingReliableSequenceNumber += incomingCommand -> fragmentCount - 1; + } + + if (currentCommand == enet_list_begin (& channel -> incomingReliableCommands)) + return; + + channel -> incomingUnreliableSequenceNumber = 0; + + enet_list_move (enet_list_end (& peer -> dispatchedCommands), enet_list_begin (& channel -> incomingReliableCommands), enet_list_previous (currentCommand)); + + if (! (peer -> flags & ENET_PEER_FLAG_NEEDS_DISPATCH)) + { + enet_list_insert (enet_list_end (& peer -> host -> dispatchQueue), & peer -> dispatchList); + + peer -> flags |= ENET_PEER_FLAG_NEEDS_DISPATCH; + } + + if (! enet_list_empty (& channel -> incomingUnreliableCommands)) + enet_peer_dispatch_incoming_unreliable_commands (peer, channel, queuedCommand); +} + +ENetIncomingCommand * +enet_peer_queue_incoming_command (ENetPeer * peer, const ENetProtocol * command, const void * data, size_t dataLength, enet_uint32 flags, enet_uint32 fragmentCount) +{ + static ENetIncomingCommand dummyCommand; + + ENetChannel * channel = & peer -> channels [command -> header.channelID]; + enet_uint32 unreliableSequenceNumber = 0, reliableSequenceNumber = 0; + enet_uint16 reliableWindow, currentWindow; + ENetIncomingCommand * incomingCommand; + ENetListIterator currentCommand; + ENetPacket * packet = NULL; + + if (peer -> state == ENET_PEER_STATE_DISCONNECT_LATER) + goto discardCommand; + + if ((command -> header.command & ENET_PROTOCOL_COMMAND_MASK) != ENET_PROTOCOL_COMMAND_SEND_UNSEQUENCED) + { + reliableSequenceNumber = command -> header.reliableSequenceNumber; + reliableWindow = reliableSequenceNumber / ENET_PEER_RELIABLE_WINDOW_SIZE; + currentWindow = channel -> incomingReliableSequenceNumber / ENET_PEER_RELIABLE_WINDOW_SIZE; + + if (reliableSequenceNumber < channel -> incomingReliableSequenceNumber) + reliableWindow += ENET_PEER_RELIABLE_WINDOWS; + + if (reliableWindow < currentWindow || reliableWindow >= currentWindow + ENET_PEER_FREE_RELIABLE_WINDOWS - 1) + goto discardCommand; + } + + switch (command -> header.command & ENET_PROTOCOL_COMMAND_MASK) + { + case ENET_PROTOCOL_COMMAND_SEND_FRAGMENT: + case ENET_PROTOCOL_COMMAND_SEND_RELIABLE: + if (reliableSequenceNumber == channel -> incomingReliableSequenceNumber) + goto discardCommand; + + for (currentCommand = enet_list_previous (enet_list_end (& channel -> incomingReliableCommands)); + currentCommand != enet_list_end (& channel -> incomingReliableCommands); + currentCommand = enet_list_previous (currentCommand)) + { + incomingCommand = (ENetIncomingCommand *) currentCommand; + + if (reliableSequenceNumber >= channel -> incomingReliableSequenceNumber) + { + if (incomingCommand -> reliableSequenceNumber < channel -> incomingReliableSequenceNumber) + continue; + } + else + if (incomingCommand -> reliableSequenceNumber >= channel -> incomingReliableSequenceNumber) + break; + + if (incomingCommand -> reliableSequenceNumber <= reliableSequenceNumber) + { + if (incomingCommand -> reliableSequenceNumber < reliableSequenceNumber) + break; + + goto discardCommand; + } + } + break; + + case ENET_PROTOCOL_COMMAND_SEND_UNRELIABLE: + case ENET_PROTOCOL_COMMAND_SEND_UNRELIABLE_FRAGMENT: + unreliableSequenceNumber = ENET_NET_TO_HOST_16 (command -> sendUnreliable.unreliableSequenceNumber); + + if (reliableSequenceNumber == channel -> incomingReliableSequenceNumber && + unreliableSequenceNumber <= channel -> incomingUnreliableSequenceNumber) + goto discardCommand; + + for (currentCommand = enet_list_previous (enet_list_end (& channel -> incomingUnreliableCommands)); + currentCommand != enet_list_end (& channel -> incomingUnreliableCommands); + currentCommand = enet_list_previous (currentCommand)) + { + incomingCommand = (ENetIncomingCommand *) currentCommand; + + if ((command -> header.command & ENET_PROTOCOL_COMMAND_MASK) == ENET_PROTOCOL_COMMAND_SEND_UNSEQUENCED) + continue; + + if (reliableSequenceNumber >= channel -> incomingReliableSequenceNumber) + { + if (incomingCommand -> reliableSequenceNumber < channel -> incomingReliableSequenceNumber) + continue; + } + else + if (incomingCommand -> reliableSequenceNumber >= channel -> incomingReliableSequenceNumber) + break; + + if (incomingCommand -> reliableSequenceNumber < reliableSequenceNumber) + break; + + if (incomingCommand -> reliableSequenceNumber > reliableSequenceNumber) + continue; + + if (incomingCommand -> unreliableSequenceNumber <= unreliableSequenceNumber) + { + if (incomingCommand -> unreliableSequenceNumber < unreliableSequenceNumber) + break; + + goto discardCommand; + } + } + break; + + case ENET_PROTOCOL_COMMAND_SEND_UNSEQUENCED: + currentCommand = enet_list_end (& channel -> incomingUnreliableCommands); + break; + + default: + goto discardCommand; + } + + if (peer -> totalWaitingData >= peer -> host -> maximumWaitingData) + goto notifyError; + + packet = enet_packet_create (data, dataLength, flags); + if (packet == NULL) + goto notifyError; + + incomingCommand = (ENetIncomingCommand *) enet_malloc (sizeof (ENetIncomingCommand)); + if (incomingCommand == NULL) + goto notifyError; + + incomingCommand -> reliableSequenceNumber = command -> header.reliableSequenceNumber; + incomingCommand -> unreliableSequenceNumber = unreliableSequenceNumber & 0xFFFF; + incomingCommand -> command = * command; + incomingCommand -> fragmentCount = fragmentCount; + incomingCommand -> fragmentsRemaining = fragmentCount; + incomingCommand -> packet = packet; + incomingCommand -> fragments = NULL; + + if (fragmentCount > 0) + { + if (fragmentCount <= ENET_PROTOCOL_MAXIMUM_FRAGMENT_COUNT) + incomingCommand -> fragments = (enet_uint32 *) enet_malloc ((fragmentCount + 31) / 32 * sizeof (enet_uint32)); + if (incomingCommand -> fragments == NULL) + { + enet_free (incomingCommand); + + goto notifyError; + } + memset (incomingCommand -> fragments, 0, (fragmentCount + 31) / 32 * sizeof (enet_uint32)); + } + + if (packet != NULL) + { + ++ packet -> referenceCount; + + peer -> totalWaitingData += packet -> dataLength; + } + + enet_list_insert (enet_list_next (currentCommand), incomingCommand); + + switch (command -> header.command & ENET_PROTOCOL_COMMAND_MASK) + { + case ENET_PROTOCOL_COMMAND_SEND_FRAGMENT: + case ENET_PROTOCOL_COMMAND_SEND_RELIABLE: + enet_peer_dispatch_incoming_reliable_commands (peer, channel, incomingCommand); + break; + + default: + enet_peer_dispatch_incoming_unreliable_commands (peer, channel, incomingCommand); + break; + } + + return incomingCommand; + +discardCommand: + if (fragmentCount > 0) + goto notifyError; + + if (packet != NULL && packet -> referenceCount == 0) + enet_packet_destroy (packet); + + return & dummyCommand; + +notifyError: + if (packet != NULL && packet -> referenceCount == 0) + enet_packet_destroy (packet); + + return NULL; +} + +/** @} */ diff --git a/pkg/enet/enet/protocol.c b/pkg/enet/enet/protocol.c new file mode 100644 index 000000000..1206b58c9 --- /dev/null +++ b/pkg/enet/enet/protocol.c @@ -0,0 +1,1919 @@ +/** + @file protocol.c + @brief ENet protocol functions +*/ +#include +#include +#define ENET_BUILDING_LIB 1 +#include "enet/utility.h" +#include "enet/time.h" +#include "enet/enet.h" + +static const size_t commandSizes [ENET_PROTOCOL_COMMAND_COUNT] = +{ + 0, + sizeof (ENetProtocolAcknowledge), + sizeof (ENetProtocolConnect), + sizeof (ENetProtocolVerifyConnect), + sizeof (ENetProtocolDisconnect), + sizeof (ENetProtocolPing), + sizeof (ENetProtocolSendReliable), + sizeof (ENetProtocolSendUnreliable), + sizeof (ENetProtocolSendFragment), + sizeof (ENetProtocolSendUnsequenced), + sizeof (ENetProtocolBandwidthLimit), + sizeof (ENetProtocolThrottleConfigure), + sizeof (ENetProtocolSendFragment) +}; + +size_t +enet_protocol_command_size (enet_uint8 commandNumber) +{ + return commandSizes [commandNumber & ENET_PROTOCOL_COMMAND_MASK]; +} + +static void +enet_protocol_change_state (ENetHost * host, ENetPeer * peer, ENetPeerState state) +{ + if (state == ENET_PEER_STATE_CONNECTED || state == ENET_PEER_STATE_DISCONNECT_LATER) + enet_peer_on_connect (peer); + else + enet_peer_on_disconnect (peer); + + peer -> state = state; +} + +static void +enet_protocol_dispatch_state (ENetHost * host, ENetPeer * peer, ENetPeerState state) +{ + enet_protocol_change_state (host, peer, state); + + if (! (peer -> flags & ENET_PEER_FLAG_NEEDS_DISPATCH)) + { + enet_list_insert (enet_list_end (& host -> dispatchQueue), & peer -> dispatchList); + + peer -> flags |= ENET_PEER_FLAG_NEEDS_DISPATCH; + } +} + +static int +enet_protocol_dispatch_incoming_commands (ENetHost * host, ENetEvent * event) +{ + while (! enet_list_empty (& host -> dispatchQueue)) + { + ENetPeer * peer = (ENetPeer *) enet_list_remove (enet_list_begin (& host -> dispatchQueue)); + + peer -> flags &= ~ ENET_PEER_FLAG_NEEDS_DISPATCH; + + switch (peer -> state) + { + case ENET_PEER_STATE_CONNECTION_PENDING: + case ENET_PEER_STATE_CONNECTION_SUCCEEDED: + enet_protocol_change_state (host, peer, ENET_PEER_STATE_CONNECTED); + + event -> type = ENET_EVENT_TYPE_CONNECT; + event -> peer = peer; + event -> data = peer -> eventData; + + return 1; + + case ENET_PEER_STATE_ZOMBIE: + host -> recalculateBandwidthLimits = 1; + + event -> type = ENET_EVENT_TYPE_DISCONNECT; + event -> peer = peer; + event -> data = peer -> eventData; + + enet_peer_reset (peer); + + return 1; + + case ENET_PEER_STATE_CONNECTED: + if (enet_list_empty (& peer -> dispatchedCommands)) + continue; + + event -> packet = enet_peer_receive (peer, & event -> channelID); + if (event -> packet == NULL) + continue; + + event -> type = ENET_EVENT_TYPE_RECEIVE; + event -> peer = peer; + + if (! enet_list_empty (& peer -> dispatchedCommands)) + { + peer -> flags |= ENET_PEER_FLAG_NEEDS_DISPATCH; + + enet_list_insert (enet_list_end (& host -> dispatchQueue), & peer -> dispatchList); + } + + return 1; + + default: + break; + } + } + + return 0; +} + +static void +enet_protocol_notify_connect (ENetHost * host, ENetPeer * peer, ENetEvent * event) +{ + host -> recalculateBandwidthLimits = 1; + + if (event != NULL) + { + enet_protocol_change_state (host, peer, ENET_PEER_STATE_CONNECTED); + + event -> type = ENET_EVENT_TYPE_CONNECT; + event -> peer = peer; + event -> data = peer -> eventData; + } + else + enet_protocol_dispatch_state (host, peer, peer -> state == ENET_PEER_STATE_CONNECTING ? ENET_PEER_STATE_CONNECTION_SUCCEEDED : ENET_PEER_STATE_CONNECTION_PENDING); +} + +static void +enet_protocol_notify_disconnect (ENetHost * host, ENetPeer * peer, ENetEvent * event) +{ + if (peer -> state >= ENET_PEER_STATE_CONNECTION_PENDING) + host -> recalculateBandwidthLimits = 1; + + if (peer -> state != ENET_PEER_STATE_CONNECTING && peer -> state < ENET_PEER_STATE_CONNECTION_SUCCEEDED) + enet_peer_reset (peer); + else + if (event != NULL) + { + event -> type = ENET_EVENT_TYPE_DISCONNECT; + event -> peer = peer; + event -> data = 0; + + enet_peer_reset (peer); + } + else + { + peer -> eventData = 0; + + enet_protocol_dispatch_state (host, peer, ENET_PEER_STATE_ZOMBIE); + } +} + +static void +enet_protocol_remove_sent_unreliable_commands (ENetPeer * peer, ENetList * sentUnreliableCommands) +{ + ENetOutgoingCommand * outgoingCommand; + + if (enet_list_empty (sentUnreliableCommands)) + return; + + do + { + outgoingCommand = (ENetOutgoingCommand *) enet_list_front (sentUnreliableCommands); + + enet_list_remove (& outgoingCommand -> outgoingCommandList); + + if (outgoingCommand -> packet != NULL) + { + -- outgoingCommand -> packet -> referenceCount; + + if (outgoingCommand -> packet -> referenceCount == 0) + { + outgoingCommand -> packet -> flags |= ENET_PACKET_FLAG_SENT; + + enet_packet_destroy (outgoingCommand -> packet); + } + } + + enet_free (outgoingCommand); + } while (! enet_list_empty (sentUnreliableCommands)); + + if (peer -> state == ENET_PEER_STATE_DISCONNECT_LATER && + ! enet_peer_has_outgoing_commands (peer)) + enet_peer_disconnect (peer, peer -> eventData); +} + +static ENetOutgoingCommand * +enet_protocol_find_sent_reliable_command (ENetList * list, enet_uint16 reliableSequenceNumber, enet_uint8 channelID) +{ + ENetListIterator currentCommand; + + for (currentCommand = enet_list_begin (list); + currentCommand != enet_list_end (list); + currentCommand = enet_list_next (currentCommand)) + { + ENetOutgoingCommand * outgoingCommand = (ENetOutgoingCommand *) currentCommand; + + if (! (outgoingCommand -> command.header.command & ENET_PROTOCOL_COMMAND_FLAG_ACKNOWLEDGE)) + continue; + + if (outgoingCommand -> sendAttempts < 1) + break; + + if (outgoingCommand -> reliableSequenceNumber == reliableSequenceNumber && + outgoingCommand -> command.header.channelID == channelID) + return outgoingCommand; + } + + return NULL; +} + +static ENetProtocolCommand +enet_protocol_remove_sent_reliable_command (ENetPeer * peer, enet_uint16 reliableSequenceNumber, enet_uint8 channelID) +{ + ENetOutgoingCommand * outgoingCommand = NULL; + ENetListIterator currentCommand; + ENetProtocolCommand commandNumber; + int wasSent = 1; + + for (currentCommand = enet_list_begin (& peer -> sentReliableCommands); + currentCommand != enet_list_end (& peer -> sentReliableCommands); + currentCommand = enet_list_next (currentCommand)) + { + outgoingCommand = (ENetOutgoingCommand *) currentCommand; + + if (outgoingCommand -> reliableSequenceNumber == reliableSequenceNumber && + outgoingCommand -> command.header.channelID == channelID) + break; + } + + if (currentCommand == enet_list_end (& peer -> sentReliableCommands)) + { + outgoingCommand = enet_protocol_find_sent_reliable_command (& peer -> outgoingCommands, reliableSequenceNumber, channelID); + if (outgoingCommand == NULL) + outgoingCommand = enet_protocol_find_sent_reliable_command (& peer -> outgoingSendReliableCommands, reliableSequenceNumber, channelID); + + wasSent = 0; + } + + if (outgoingCommand == NULL) + return ENET_PROTOCOL_COMMAND_NONE; + + if (channelID < peer -> channelCount) + { + ENetChannel * channel = & peer -> channels [channelID]; + enet_uint16 reliableWindow = reliableSequenceNumber / ENET_PEER_RELIABLE_WINDOW_SIZE; + if (channel -> reliableWindows [reliableWindow] > 0) + { + -- channel -> reliableWindows [reliableWindow]; + if (! channel -> reliableWindows [reliableWindow]) + channel -> usedReliableWindows &= ~ (1u << reliableWindow); + } + } + + commandNumber = (ENetProtocolCommand) (outgoingCommand -> command.header.command & ENET_PROTOCOL_COMMAND_MASK); + + enet_list_remove (& outgoingCommand -> outgoingCommandList); + + if (outgoingCommand -> packet != NULL) + { + if (wasSent) + peer -> reliableDataInTransit -= outgoingCommand -> fragmentLength; + + -- outgoingCommand -> packet -> referenceCount; + + if (outgoingCommand -> packet -> referenceCount == 0) + { + outgoingCommand -> packet -> flags |= ENET_PACKET_FLAG_SENT; + + enet_packet_destroy (outgoingCommand -> packet); + } + } + + enet_free (outgoingCommand); + + if (enet_list_empty (& peer -> sentReliableCommands)) + return commandNumber; + + outgoingCommand = (ENetOutgoingCommand *) enet_list_front (& peer -> sentReliableCommands); + + peer -> nextTimeout = outgoingCommand -> sentTime + outgoingCommand -> roundTripTimeout; + + return commandNumber; +} + +static ENetPeer * +enet_protocol_handle_connect (ENetHost * host, ENetProtocolHeader * header, ENetProtocol * command) +{ + enet_uint8 incomingSessionID, outgoingSessionID; + enet_uint32 mtu, windowSize; + ENetChannel * channel; + size_t channelCount, duplicatePeers = 0; + ENetPeer * currentPeer, * peer = NULL; + ENetProtocol verifyCommand; + + channelCount = ENET_NET_TO_HOST_32 (command -> connect.channelCount); + + if (channelCount < ENET_PROTOCOL_MINIMUM_CHANNEL_COUNT || + channelCount > ENET_PROTOCOL_MAXIMUM_CHANNEL_COUNT) + return NULL; + + for (currentPeer = host -> peers; + currentPeer < & host -> peers [host -> peerCount]; + ++ currentPeer) + { + if (currentPeer -> state == ENET_PEER_STATE_DISCONNECTED) + { + if (peer == NULL) + peer = currentPeer; + } + else + if (currentPeer -> state != ENET_PEER_STATE_CONNECTING && + currentPeer -> address.host == host -> receivedAddress.host) + { + if (currentPeer -> address.port == host -> receivedAddress.port && + currentPeer -> connectID == command -> connect.connectID) + return NULL; + + ++ duplicatePeers; + } + } + + if (peer == NULL || duplicatePeers >= host -> duplicatePeers) + return NULL; + + if (channelCount > host -> channelLimit) + channelCount = host -> channelLimit; + peer -> channels = (ENetChannel *) enet_malloc (channelCount * sizeof (ENetChannel)); + if (peer -> channels == NULL) + return NULL; + peer -> channelCount = channelCount; + peer -> state = ENET_PEER_STATE_ACKNOWLEDGING_CONNECT; + peer -> connectID = command -> connect.connectID; + peer -> address = host -> receivedAddress; + peer -> mtu = host -> mtu; + peer -> outgoingPeerID = ENET_NET_TO_HOST_16 (command -> connect.outgoingPeerID); + peer -> incomingBandwidth = ENET_NET_TO_HOST_32 (command -> connect.incomingBandwidth); + peer -> outgoingBandwidth = ENET_NET_TO_HOST_32 (command -> connect.outgoingBandwidth); + peer -> packetThrottleInterval = ENET_NET_TO_HOST_32 (command -> connect.packetThrottleInterval); + peer -> packetThrottleAcceleration = ENET_NET_TO_HOST_32 (command -> connect.packetThrottleAcceleration); + peer -> packetThrottleDeceleration = ENET_NET_TO_HOST_32 (command -> connect.packetThrottleDeceleration); + peer -> eventData = ENET_NET_TO_HOST_32 (command -> connect.data); + + incomingSessionID = command -> connect.incomingSessionID == 0xFF ? peer -> outgoingSessionID : command -> connect.incomingSessionID; + incomingSessionID = (incomingSessionID + 1) & (ENET_PROTOCOL_HEADER_SESSION_MASK >> ENET_PROTOCOL_HEADER_SESSION_SHIFT); + if (incomingSessionID == peer -> outgoingSessionID) + incomingSessionID = (incomingSessionID + 1) & (ENET_PROTOCOL_HEADER_SESSION_MASK >> ENET_PROTOCOL_HEADER_SESSION_SHIFT); + peer -> outgoingSessionID = incomingSessionID; + + outgoingSessionID = command -> connect.outgoingSessionID == 0xFF ? peer -> incomingSessionID : command -> connect.outgoingSessionID; + outgoingSessionID = (outgoingSessionID + 1) & (ENET_PROTOCOL_HEADER_SESSION_MASK >> ENET_PROTOCOL_HEADER_SESSION_SHIFT); + if (outgoingSessionID == peer -> incomingSessionID) + outgoingSessionID = (outgoingSessionID + 1) & (ENET_PROTOCOL_HEADER_SESSION_MASK >> ENET_PROTOCOL_HEADER_SESSION_SHIFT); + peer -> incomingSessionID = outgoingSessionID; + + for (channel = peer -> channels; + channel < & peer -> channels [channelCount]; + ++ channel) + { + channel -> outgoingReliableSequenceNumber = 0; + channel -> outgoingUnreliableSequenceNumber = 0; + channel -> incomingReliableSequenceNumber = 0; + channel -> incomingUnreliableSequenceNumber = 0; + + enet_list_clear (& channel -> incomingReliableCommands); + enet_list_clear (& channel -> incomingUnreliableCommands); + + channel -> usedReliableWindows = 0; + memset (channel -> reliableWindows, 0, sizeof (channel -> reliableWindows)); + } + + mtu = ENET_NET_TO_HOST_32 (command -> connect.mtu); + + if (mtu < ENET_PROTOCOL_MINIMUM_MTU) + mtu = ENET_PROTOCOL_MINIMUM_MTU; + else + if (mtu > ENET_PROTOCOL_MAXIMUM_MTU) + mtu = ENET_PROTOCOL_MAXIMUM_MTU; + + if (mtu < peer -> mtu) + peer -> mtu = mtu; + + if (host -> outgoingBandwidth == 0 && + peer -> incomingBandwidth == 0) + peer -> windowSize = ENET_PROTOCOL_MAXIMUM_WINDOW_SIZE; + else + if (host -> outgoingBandwidth == 0 || + peer -> incomingBandwidth == 0) + peer -> windowSize = (ENET_MAX (host -> outgoingBandwidth, peer -> incomingBandwidth) / + ENET_PEER_WINDOW_SIZE_SCALE) * + ENET_PROTOCOL_MINIMUM_WINDOW_SIZE; + else + peer -> windowSize = (ENET_MIN (host -> outgoingBandwidth, peer -> incomingBandwidth) / + ENET_PEER_WINDOW_SIZE_SCALE) * + ENET_PROTOCOL_MINIMUM_WINDOW_SIZE; + + if (peer -> windowSize < ENET_PROTOCOL_MINIMUM_WINDOW_SIZE) + peer -> windowSize = ENET_PROTOCOL_MINIMUM_WINDOW_SIZE; + else + if (peer -> windowSize > ENET_PROTOCOL_MAXIMUM_WINDOW_SIZE) + peer -> windowSize = ENET_PROTOCOL_MAXIMUM_WINDOW_SIZE; + + if (host -> incomingBandwidth == 0) + windowSize = ENET_PROTOCOL_MAXIMUM_WINDOW_SIZE; + else + windowSize = (host -> incomingBandwidth / ENET_PEER_WINDOW_SIZE_SCALE) * + ENET_PROTOCOL_MINIMUM_WINDOW_SIZE; + + if (windowSize > ENET_NET_TO_HOST_32 (command -> connect.windowSize)) + windowSize = ENET_NET_TO_HOST_32 (command -> connect.windowSize); + + if (windowSize < ENET_PROTOCOL_MINIMUM_WINDOW_SIZE) + windowSize = ENET_PROTOCOL_MINIMUM_WINDOW_SIZE; + else + if (windowSize > ENET_PROTOCOL_MAXIMUM_WINDOW_SIZE) + windowSize = ENET_PROTOCOL_MAXIMUM_WINDOW_SIZE; + + verifyCommand.header.command = ENET_PROTOCOL_COMMAND_VERIFY_CONNECT | ENET_PROTOCOL_COMMAND_FLAG_ACKNOWLEDGE; + verifyCommand.header.channelID = 0xFF; + verifyCommand.verifyConnect.outgoingPeerID = ENET_HOST_TO_NET_16 (peer -> incomingPeerID); + verifyCommand.verifyConnect.incomingSessionID = incomingSessionID; + verifyCommand.verifyConnect.outgoingSessionID = outgoingSessionID; + verifyCommand.verifyConnect.mtu = ENET_HOST_TO_NET_32 (peer -> mtu); + verifyCommand.verifyConnect.windowSize = ENET_HOST_TO_NET_32 (windowSize); + verifyCommand.verifyConnect.channelCount = ENET_HOST_TO_NET_32 (channelCount); + verifyCommand.verifyConnect.incomingBandwidth = ENET_HOST_TO_NET_32 (host -> incomingBandwidth); + verifyCommand.verifyConnect.outgoingBandwidth = ENET_HOST_TO_NET_32 (host -> outgoingBandwidth); + verifyCommand.verifyConnect.packetThrottleInterval = ENET_HOST_TO_NET_32 (peer -> packetThrottleInterval); + verifyCommand.verifyConnect.packetThrottleAcceleration = ENET_HOST_TO_NET_32 (peer -> packetThrottleAcceleration); + verifyCommand.verifyConnect.packetThrottleDeceleration = ENET_HOST_TO_NET_32 (peer -> packetThrottleDeceleration); + verifyCommand.verifyConnect.connectID = peer -> connectID; + + enet_peer_queue_outgoing_command (peer, & verifyCommand, NULL, 0, 0); + + return peer; +} + +static int +enet_protocol_handle_send_reliable (ENetHost * host, ENetPeer * peer, const ENetProtocol * command, enet_uint8 ** currentData) +{ + size_t dataLength; + + if (command -> header.channelID >= peer -> channelCount || + (peer -> state != ENET_PEER_STATE_CONNECTED && peer -> state != ENET_PEER_STATE_DISCONNECT_LATER)) + return -1; + + dataLength = ENET_NET_TO_HOST_16 (command -> sendReliable.dataLength); + * currentData += dataLength; + if (dataLength > host -> maximumPacketSize || + * currentData < host -> receivedData || + * currentData > & host -> receivedData [host -> receivedDataLength]) + return -1; + + if (enet_peer_queue_incoming_command (peer, command, (const enet_uint8 *) command + sizeof (ENetProtocolSendReliable), dataLength, ENET_PACKET_FLAG_RELIABLE, 0) == NULL) + return -1; + + return 0; +} + +static int +enet_protocol_handle_send_unsequenced (ENetHost * host, ENetPeer * peer, const ENetProtocol * command, enet_uint8 ** currentData) +{ + enet_uint32 unsequencedGroup, index; + size_t dataLength; + + if (command -> header.channelID >= peer -> channelCount || + (peer -> state != ENET_PEER_STATE_CONNECTED && peer -> state != ENET_PEER_STATE_DISCONNECT_LATER)) + return -1; + + dataLength = ENET_NET_TO_HOST_16 (command -> sendUnsequenced.dataLength); + * currentData += dataLength; + if (dataLength > host -> maximumPacketSize || + * currentData < host -> receivedData || + * currentData > & host -> receivedData [host -> receivedDataLength]) + return -1; + + unsequencedGroup = ENET_NET_TO_HOST_16 (command -> sendUnsequenced.unsequencedGroup); + index = unsequencedGroup % ENET_PEER_UNSEQUENCED_WINDOW_SIZE; + + if (unsequencedGroup < peer -> incomingUnsequencedGroup) + unsequencedGroup += 0x10000; + + if (unsequencedGroup >= (enet_uint32) peer -> incomingUnsequencedGroup + ENET_PEER_FREE_UNSEQUENCED_WINDOWS * ENET_PEER_UNSEQUENCED_WINDOW_SIZE) + return 0; + + unsequencedGroup &= 0xFFFF; + + if (unsequencedGroup - index != peer -> incomingUnsequencedGroup) + { + peer -> incomingUnsequencedGroup = unsequencedGroup - index; + + memset (peer -> unsequencedWindow, 0, sizeof (peer -> unsequencedWindow)); + } + else + if (peer -> unsequencedWindow [index / 32] & (1u << (index % 32))) + return 0; + + if (enet_peer_queue_incoming_command (peer, command, (const enet_uint8 *) command + sizeof (ENetProtocolSendUnsequenced), dataLength, ENET_PACKET_FLAG_UNSEQUENCED, 0) == NULL) + return -1; + + peer -> unsequencedWindow [index / 32] |= 1u << (index % 32); + + return 0; +} + +static int +enet_protocol_handle_send_unreliable (ENetHost * host, ENetPeer * peer, const ENetProtocol * command, enet_uint8 ** currentData) +{ + size_t dataLength; + + if (command -> header.channelID >= peer -> channelCount || + (peer -> state != ENET_PEER_STATE_CONNECTED && peer -> state != ENET_PEER_STATE_DISCONNECT_LATER)) + return -1; + + dataLength = ENET_NET_TO_HOST_16 (command -> sendUnreliable.dataLength); + * currentData += dataLength; + if (dataLength > host -> maximumPacketSize || + * currentData < host -> receivedData || + * currentData > & host -> receivedData [host -> receivedDataLength]) + return -1; + + if (enet_peer_queue_incoming_command (peer, command, (const enet_uint8 *) command + sizeof (ENetProtocolSendUnreliable), dataLength, 0, 0) == NULL) + return -1; + + return 0; +} + +static int +enet_protocol_handle_send_fragment (ENetHost * host, ENetPeer * peer, const ENetProtocol * command, enet_uint8 ** currentData) +{ + enet_uint32 fragmentNumber, + fragmentCount, + fragmentOffset, + fragmentLength, + startSequenceNumber, + totalLength; + ENetChannel * channel; + enet_uint16 startWindow, currentWindow; + ENetListIterator currentCommand; + ENetIncomingCommand * startCommand = NULL; + + if (command -> header.channelID >= peer -> channelCount || + (peer -> state != ENET_PEER_STATE_CONNECTED && peer -> state != ENET_PEER_STATE_DISCONNECT_LATER)) + return -1; + + fragmentLength = ENET_NET_TO_HOST_16 (command -> sendFragment.dataLength); + * currentData += fragmentLength; + if (fragmentLength <= 0 || + fragmentLength > host -> maximumPacketSize || + * currentData < host -> receivedData || + * currentData > & host -> receivedData [host -> receivedDataLength]) + return -1; + + channel = & peer -> channels [command -> header.channelID]; + startSequenceNumber = ENET_NET_TO_HOST_16 (command -> sendFragment.startSequenceNumber); + startWindow = startSequenceNumber / ENET_PEER_RELIABLE_WINDOW_SIZE; + currentWindow = channel -> incomingReliableSequenceNumber / ENET_PEER_RELIABLE_WINDOW_SIZE; + + if (startSequenceNumber < channel -> incomingReliableSequenceNumber) + startWindow += ENET_PEER_RELIABLE_WINDOWS; + + if (startWindow < currentWindow || startWindow >= currentWindow + ENET_PEER_FREE_RELIABLE_WINDOWS - 1) + return 0; + + fragmentNumber = ENET_NET_TO_HOST_32 (command -> sendFragment.fragmentNumber); + fragmentCount = ENET_NET_TO_HOST_32 (command -> sendFragment.fragmentCount); + fragmentOffset = ENET_NET_TO_HOST_32 (command -> sendFragment.fragmentOffset); + totalLength = ENET_NET_TO_HOST_32 (command -> sendFragment.totalLength); + + if (fragmentCount > ENET_PROTOCOL_MAXIMUM_FRAGMENT_COUNT || + fragmentNumber >= fragmentCount || + totalLength > host -> maximumPacketSize || + totalLength < fragmentCount || + fragmentOffset >= totalLength || + fragmentLength > totalLength - fragmentOffset) + return -1; + + for (currentCommand = enet_list_previous (enet_list_end (& channel -> incomingReliableCommands)); + currentCommand != enet_list_end (& channel -> incomingReliableCommands); + currentCommand = enet_list_previous (currentCommand)) + { + ENetIncomingCommand * incomingCommand = (ENetIncomingCommand *) currentCommand; + + if (startSequenceNumber >= channel -> incomingReliableSequenceNumber) + { + if (incomingCommand -> reliableSequenceNumber < channel -> incomingReliableSequenceNumber) + continue; + } + else + if (incomingCommand -> reliableSequenceNumber >= channel -> incomingReliableSequenceNumber) + break; + + if (incomingCommand -> reliableSequenceNumber <= startSequenceNumber) + { + if (incomingCommand -> reliableSequenceNumber < startSequenceNumber) + break; + + if ((incomingCommand -> command.header.command & ENET_PROTOCOL_COMMAND_MASK) != ENET_PROTOCOL_COMMAND_SEND_FRAGMENT || + totalLength != incomingCommand -> packet -> dataLength || + fragmentCount != incomingCommand -> fragmentCount) + return -1; + + startCommand = incomingCommand; + break; + } + } + + if (startCommand == NULL) + { + ENetProtocol hostCommand = * command; + + hostCommand.header.reliableSequenceNumber = startSequenceNumber; + + startCommand = enet_peer_queue_incoming_command (peer, & hostCommand, NULL, totalLength, ENET_PACKET_FLAG_RELIABLE, fragmentCount); + if (startCommand == NULL) + return -1; + } + + if ((startCommand -> fragments [fragmentNumber / 32] & (1u << (fragmentNumber % 32))) == 0) + { + -- startCommand -> fragmentsRemaining; + + startCommand -> fragments [fragmentNumber / 32] |= (1u << (fragmentNumber % 32)); + + if (fragmentOffset + fragmentLength > startCommand -> packet -> dataLength) + fragmentLength = startCommand -> packet -> dataLength - fragmentOffset; + + memcpy (startCommand -> packet -> data + fragmentOffset, + (enet_uint8 *) command + sizeof (ENetProtocolSendFragment), + fragmentLength); + + if (startCommand -> fragmentsRemaining <= 0) + enet_peer_dispatch_incoming_reliable_commands (peer, channel, NULL); + } + + return 0; +} + +static int +enet_protocol_handle_send_unreliable_fragment (ENetHost * host, ENetPeer * peer, const ENetProtocol * command, enet_uint8 ** currentData) +{ + enet_uint32 fragmentNumber, + fragmentCount, + fragmentOffset, + fragmentLength, + reliableSequenceNumber, + startSequenceNumber, + totalLength; + enet_uint16 reliableWindow, currentWindow; + ENetChannel * channel; + ENetListIterator currentCommand; + ENetIncomingCommand * startCommand = NULL; + + if (command -> header.channelID >= peer -> channelCount || + (peer -> state != ENET_PEER_STATE_CONNECTED && peer -> state != ENET_PEER_STATE_DISCONNECT_LATER)) + return -1; + + fragmentLength = ENET_NET_TO_HOST_16 (command -> sendFragment.dataLength); + * currentData += fragmentLength; + if (fragmentLength > host -> maximumPacketSize || + * currentData < host -> receivedData || + * currentData > & host -> receivedData [host -> receivedDataLength]) + return -1; + + channel = & peer -> channels [command -> header.channelID]; + reliableSequenceNumber = command -> header.reliableSequenceNumber; + startSequenceNumber = ENET_NET_TO_HOST_16 (command -> sendFragment.startSequenceNumber); + + reliableWindow = reliableSequenceNumber / ENET_PEER_RELIABLE_WINDOW_SIZE; + currentWindow = channel -> incomingReliableSequenceNumber / ENET_PEER_RELIABLE_WINDOW_SIZE; + + if (reliableSequenceNumber < channel -> incomingReliableSequenceNumber) + reliableWindow += ENET_PEER_RELIABLE_WINDOWS; + + if (reliableWindow < currentWindow || reliableWindow >= currentWindow + ENET_PEER_FREE_RELIABLE_WINDOWS - 1) + return 0; + + if (reliableSequenceNumber == channel -> incomingReliableSequenceNumber && + startSequenceNumber <= channel -> incomingUnreliableSequenceNumber) + return 0; + + fragmentNumber = ENET_NET_TO_HOST_32 (command -> sendFragment.fragmentNumber); + fragmentCount = ENET_NET_TO_HOST_32 (command -> sendFragment.fragmentCount); + fragmentOffset = ENET_NET_TO_HOST_32 (command -> sendFragment.fragmentOffset); + totalLength = ENET_NET_TO_HOST_32 (command -> sendFragment.totalLength); + + if (fragmentCount > ENET_PROTOCOL_MAXIMUM_FRAGMENT_COUNT || + fragmentNumber >= fragmentCount || + totalLength > host -> maximumPacketSize || + fragmentOffset >= totalLength || + fragmentLength > totalLength - fragmentOffset) + return -1; + + for (currentCommand = enet_list_previous (enet_list_end (& channel -> incomingUnreliableCommands)); + currentCommand != enet_list_end (& channel -> incomingUnreliableCommands); + currentCommand = enet_list_previous (currentCommand)) + { + ENetIncomingCommand * incomingCommand = (ENetIncomingCommand *) currentCommand; + + if (reliableSequenceNumber >= channel -> incomingReliableSequenceNumber) + { + if (incomingCommand -> reliableSequenceNumber < channel -> incomingReliableSequenceNumber) + continue; + } + else + if (incomingCommand -> reliableSequenceNumber >= channel -> incomingReliableSequenceNumber) + break; + + if (incomingCommand -> reliableSequenceNumber < reliableSequenceNumber) + break; + + if (incomingCommand -> reliableSequenceNumber > reliableSequenceNumber) + continue; + + if (incomingCommand -> unreliableSequenceNumber <= startSequenceNumber) + { + if (incomingCommand -> unreliableSequenceNumber < startSequenceNumber) + break; + + if ((incomingCommand -> command.header.command & ENET_PROTOCOL_COMMAND_MASK) != ENET_PROTOCOL_COMMAND_SEND_UNRELIABLE_FRAGMENT || + totalLength != incomingCommand -> packet -> dataLength || + fragmentCount != incomingCommand -> fragmentCount) + return -1; + + startCommand = incomingCommand; + break; + } + } + + if (startCommand == NULL) + { + startCommand = enet_peer_queue_incoming_command (peer, command, NULL, totalLength, ENET_PACKET_FLAG_UNRELIABLE_FRAGMENT, fragmentCount); + if (startCommand == NULL) + return -1; + } + + if ((startCommand -> fragments [fragmentNumber / 32] & (1u << (fragmentNumber % 32))) == 0) + { + -- startCommand -> fragmentsRemaining; + + startCommand -> fragments [fragmentNumber / 32] |= (1u << (fragmentNumber % 32)); + + if (fragmentOffset + fragmentLength > startCommand -> packet -> dataLength) + fragmentLength = startCommand -> packet -> dataLength - fragmentOffset; + + memcpy (startCommand -> packet -> data + fragmentOffset, + (enet_uint8 *) command + sizeof (ENetProtocolSendFragment), + fragmentLength); + + if (startCommand -> fragmentsRemaining <= 0) + enet_peer_dispatch_incoming_unreliable_commands (peer, channel, NULL); + } + + return 0; +} + +static int +enet_protocol_handle_ping (ENetHost * host, ENetPeer * peer, const ENetProtocol * command) +{ + if (peer -> state != ENET_PEER_STATE_CONNECTED && peer -> state != ENET_PEER_STATE_DISCONNECT_LATER) + return -1; + + return 0; +} + +static int +enet_protocol_handle_bandwidth_limit (ENetHost * host, ENetPeer * peer, const ENetProtocol * command) +{ + if (peer -> state != ENET_PEER_STATE_CONNECTED && peer -> state != ENET_PEER_STATE_DISCONNECT_LATER) + return -1; + + if (peer -> incomingBandwidth != 0) + -- host -> bandwidthLimitedPeers; + + peer -> incomingBandwidth = ENET_NET_TO_HOST_32 (command -> bandwidthLimit.incomingBandwidth); + peer -> outgoingBandwidth = ENET_NET_TO_HOST_32 (command -> bandwidthLimit.outgoingBandwidth); + + if (peer -> incomingBandwidth != 0) + ++ host -> bandwidthLimitedPeers; + + if (peer -> incomingBandwidth == 0 && host -> outgoingBandwidth == 0) + peer -> windowSize = ENET_PROTOCOL_MAXIMUM_WINDOW_SIZE; + else + if (peer -> incomingBandwidth == 0 || host -> outgoingBandwidth == 0) + peer -> windowSize = (ENET_MAX (peer -> incomingBandwidth, host -> outgoingBandwidth) / + ENET_PEER_WINDOW_SIZE_SCALE) * ENET_PROTOCOL_MINIMUM_WINDOW_SIZE; + else + peer -> windowSize = (ENET_MIN (peer -> incomingBandwidth, host -> outgoingBandwidth) / + ENET_PEER_WINDOW_SIZE_SCALE) * ENET_PROTOCOL_MINIMUM_WINDOW_SIZE; + + if (peer -> windowSize < ENET_PROTOCOL_MINIMUM_WINDOW_SIZE) + peer -> windowSize = ENET_PROTOCOL_MINIMUM_WINDOW_SIZE; + else + if (peer -> windowSize > ENET_PROTOCOL_MAXIMUM_WINDOW_SIZE) + peer -> windowSize = ENET_PROTOCOL_MAXIMUM_WINDOW_SIZE; + + return 0; +} + +static int +enet_protocol_handle_throttle_configure (ENetHost * host, ENetPeer * peer, const ENetProtocol * command) +{ + if (peer -> state != ENET_PEER_STATE_CONNECTED && peer -> state != ENET_PEER_STATE_DISCONNECT_LATER) + return -1; + + peer -> packetThrottleInterval = ENET_NET_TO_HOST_32 (command -> throttleConfigure.packetThrottleInterval); + peer -> packetThrottleAcceleration = ENET_NET_TO_HOST_32 (command -> throttleConfigure.packetThrottleAcceleration); + peer -> packetThrottleDeceleration = ENET_NET_TO_HOST_32 (command -> throttleConfigure.packetThrottleDeceleration); + + return 0; +} + +static int +enet_protocol_handle_disconnect (ENetHost * host, ENetPeer * peer, const ENetProtocol * command) +{ + if (peer -> state == ENET_PEER_STATE_DISCONNECTED || peer -> state == ENET_PEER_STATE_ZOMBIE || peer -> state == ENET_PEER_STATE_ACKNOWLEDGING_DISCONNECT) + return 0; + + enet_peer_reset_queues (peer); + + if (peer -> state == ENET_PEER_STATE_CONNECTION_SUCCEEDED || peer -> state == ENET_PEER_STATE_DISCONNECTING || peer -> state == ENET_PEER_STATE_CONNECTING) + enet_protocol_dispatch_state (host, peer, ENET_PEER_STATE_ZOMBIE); + else + if (peer -> state != ENET_PEER_STATE_CONNECTED && peer -> state != ENET_PEER_STATE_DISCONNECT_LATER) + { + if (peer -> state == ENET_PEER_STATE_CONNECTION_PENDING) host -> recalculateBandwidthLimits = 1; + + enet_peer_reset (peer); + } + else + if (command -> header.command & ENET_PROTOCOL_COMMAND_FLAG_ACKNOWLEDGE) + enet_protocol_change_state (host, peer, ENET_PEER_STATE_ACKNOWLEDGING_DISCONNECT); + else + enet_protocol_dispatch_state (host, peer, ENET_PEER_STATE_ZOMBIE); + + if (peer -> state != ENET_PEER_STATE_DISCONNECTED) + peer -> eventData = ENET_NET_TO_HOST_32 (command -> disconnect.data); + + return 0; +} + +static int +enet_protocol_handle_acknowledge (ENetHost * host, ENetEvent * event, ENetPeer * peer, const ENetProtocol * command) +{ + enet_uint32 roundTripTime, + receivedSentTime, + receivedReliableSequenceNumber; + ENetProtocolCommand commandNumber; + + if (peer -> state == ENET_PEER_STATE_DISCONNECTED || peer -> state == ENET_PEER_STATE_ZOMBIE) + return 0; + + receivedSentTime = ENET_NET_TO_HOST_16 (command -> acknowledge.receivedSentTime); + receivedSentTime |= host -> serviceTime & 0xFFFF0000; + if ((receivedSentTime & 0x8000) > (host -> serviceTime & 0x8000)) + receivedSentTime -= 0x10000; + + if (ENET_TIME_LESS (host -> serviceTime, receivedSentTime)) + return 0; + + roundTripTime = ENET_TIME_DIFFERENCE (host -> serviceTime, receivedSentTime); + roundTripTime = ENET_MAX (roundTripTime, 1); + + if (peer -> lastReceiveTime > 0) + { + enet_peer_throttle (peer, roundTripTime); + + peer -> roundTripTimeVariance -= peer -> roundTripTimeVariance / 4; + + if (roundTripTime >= peer -> roundTripTime) + { + enet_uint32 diff = roundTripTime - peer -> roundTripTime; + peer -> roundTripTimeVariance += diff / 4; + peer -> roundTripTime += diff / 8; + } + else + { + enet_uint32 diff = peer -> roundTripTime - roundTripTime; + peer -> roundTripTimeVariance += diff / 4; + peer -> roundTripTime -= diff / 8; + } + } + else + { + peer -> roundTripTime = roundTripTime; + peer -> roundTripTimeVariance = (roundTripTime + 1) / 2; + } + + if (peer -> roundTripTime < peer -> lowestRoundTripTime) + peer -> lowestRoundTripTime = peer -> roundTripTime; + + if (peer -> roundTripTimeVariance > peer -> highestRoundTripTimeVariance) + peer -> highestRoundTripTimeVariance = peer -> roundTripTimeVariance; + + if (peer -> packetThrottleEpoch == 0 || + ENET_TIME_DIFFERENCE (host -> serviceTime, peer -> packetThrottleEpoch) >= peer -> packetThrottleInterval) + { + peer -> lastRoundTripTime = peer -> lowestRoundTripTime; + peer -> lastRoundTripTimeVariance = ENET_MAX (peer -> highestRoundTripTimeVariance, 1); + peer -> lowestRoundTripTime = peer -> roundTripTime; + peer -> highestRoundTripTimeVariance = peer -> roundTripTimeVariance; + peer -> packetThrottleEpoch = host -> serviceTime; + } + + peer -> lastReceiveTime = ENET_MAX (host -> serviceTime, 1); + peer -> earliestTimeout = 0; + + receivedReliableSequenceNumber = ENET_NET_TO_HOST_16 (command -> acknowledge.receivedReliableSequenceNumber); + + commandNumber = enet_protocol_remove_sent_reliable_command (peer, receivedReliableSequenceNumber, command -> header.channelID); + + switch (peer -> state) + { + case ENET_PEER_STATE_ACKNOWLEDGING_CONNECT: + if (commandNumber != ENET_PROTOCOL_COMMAND_VERIFY_CONNECT) + return -1; + + enet_protocol_notify_connect (host, peer, event); + break; + + case ENET_PEER_STATE_DISCONNECTING: + if (commandNumber != ENET_PROTOCOL_COMMAND_DISCONNECT) + return -1; + + enet_protocol_notify_disconnect (host, peer, event); + break; + + case ENET_PEER_STATE_DISCONNECT_LATER: + if (! enet_peer_has_outgoing_commands (peer)) + enet_peer_disconnect (peer, peer -> eventData); + break; + + default: + break; + } + + return 0; +} + +static int +enet_protocol_handle_verify_connect (ENetHost * host, ENetEvent * event, ENetPeer * peer, const ENetProtocol * command) +{ + enet_uint32 mtu, windowSize; + size_t channelCount; + + if (peer -> state != ENET_PEER_STATE_CONNECTING) + return 0; + + channelCount = ENET_NET_TO_HOST_32 (command -> verifyConnect.channelCount); + + if (channelCount < ENET_PROTOCOL_MINIMUM_CHANNEL_COUNT || channelCount > ENET_PROTOCOL_MAXIMUM_CHANNEL_COUNT || + ENET_NET_TO_HOST_32 (command -> verifyConnect.packetThrottleInterval) != peer -> packetThrottleInterval || + ENET_NET_TO_HOST_32 (command -> verifyConnect.packetThrottleAcceleration) != peer -> packetThrottleAcceleration || + ENET_NET_TO_HOST_32 (command -> verifyConnect.packetThrottleDeceleration) != peer -> packetThrottleDeceleration || + command -> verifyConnect.connectID != peer -> connectID) + { + peer -> eventData = 0; + + enet_protocol_dispatch_state (host, peer, ENET_PEER_STATE_ZOMBIE); + + return -1; + } + + enet_protocol_remove_sent_reliable_command (peer, 1, 0xFF); + + if (channelCount < peer -> channelCount) + peer -> channelCount = channelCount; + + peer -> outgoingPeerID = ENET_NET_TO_HOST_16 (command -> verifyConnect.outgoingPeerID); + peer -> incomingSessionID = command -> verifyConnect.incomingSessionID; + peer -> outgoingSessionID = command -> verifyConnect.outgoingSessionID; + + mtu = ENET_NET_TO_HOST_32 (command -> verifyConnect.mtu); + + if (mtu < ENET_PROTOCOL_MINIMUM_MTU) + mtu = ENET_PROTOCOL_MINIMUM_MTU; + else + if (mtu > ENET_PROTOCOL_MAXIMUM_MTU) + mtu = ENET_PROTOCOL_MAXIMUM_MTU; + + if (mtu < peer -> mtu) + peer -> mtu = mtu; + + windowSize = ENET_NET_TO_HOST_32 (command -> verifyConnect.windowSize); + + if (windowSize < ENET_PROTOCOL_MINIMUM_WINDOW_SIZE) + windowSize = ENET_PROTOCOL_MINIMUM_WINDOW_SIZE; + + if (windowSize > ENET_PROTOCOL_MAXIMUM_WINDOW_SIZE) + windowSize = ENET_PROTOCOL_MAXIMUM_WINDOW_SIZE; + + if (windowSize < peer -> windowSize) + peer -> windowSize = windowSize; + + peer -> incomingBandwidth = ENET_NET_TO_HOST_32 (command -> verifyConnect.incomingBandwidth); + peer -> outgoingBandwidth = ENET_NET_TO_HOST_32 (command -> verifyConnect.outgoingBandwidth); + + enet_protocol_notify_connect (host, peer, event); + return 0; +} + +static int +enet_protocol_handle_incoming_commands (ENetHost * host, ENetEvent * event) +{ + ENetProtocolHeader * header; + ENetProtocol * command; + ENetPeer * peer; + enet_uint8 * currentData; + size_t headerSize; + enet_uint16 peerID, flags; + enet_uint8 sessionID; + + if (host -> receivedDataLength < (size_t) & ((ENetProtocolHeader *) 0) -> sentTime) + return 0; + + header = (ENetProtocolHeader *) host -> receivedData; + + peerID = ENET_NET_TO_HOST_16 (header -> peerID); + sessionID = (peerID & ENET_PROTOCOL_HEADER_SESSION_MASK) >> ENET_PROTOCOL_HEADER_SESSION_SHIFT; + flags = peerID & ENET_PROTOCOL_HEADER_FLAG_MASK; + peerID &= ~ (ENET_PROTOCOL_HEADER_FLAG_MASK | ENET_PROTOCOL_HEADER_SESSION_MASK); + + headerSize = (flags & ENET_PROTOCOL_HEADER_FLAG_SENT_TIME ? sizeof (ENetProtocolHeader) : (size_t) & ((ENetProtocolHeader *) 0) -> sentTime); + if (host -> checksum != NULL) + headerSize += sizeof (enet_uint32); + + if (peerID == ENET_PROTOCOL_MAXIMUM_PEER_ID) + peer = NULL; + else + if (peerID >= host -> peerCount) + return 0; + else + { + peer = & host -> peers [peerID]; + + if (peer -> state == ENET_PEER_STATE_DISCONNECTED || + peer -> state == ENET_PEER_STATE_ZOMBIE || + ((host -> receivedAddress.host != peer -> address.host || + host -> receivedAddress.port != peer -> address.port) && + peer -> address.host != ENET_HOST_BROADCAST) || + (peer -> outgoingPeerID < ENET_PROTOCOL_MAXIMUM_PEER_ID && + sessionID != peer -> incomingSessionID)) + return 0; + } + + if (flags & ENET_PROTOCOL_HEADER_FLAG_COMPRESSED) + { + size_t originalSize; + if (host -> compressor.context == NULL || host -> compressor.decompress == NULL) + return 0; + + originalSize = host -> compressor.decompress (host -> compressor.context, + host -> receivedData + headerSize, + host -> receivedDataLength - headerSize, + host -> packetData [1] + headerSize, + sizeof (host -> packetData [1]) - headerSize); + if (originalSize <= 0 || originalSize > sizeof (host -> packetData [1]) - headerSize) + return 0; + + memcpy (host -> packetData [1], header, headerSize); + host -> receivedData = host -> packetData [1]; + host -> receivedDataLength = headerSize + originalSize; + } + + if (host -> checksum != NULL) + { + enet_uint32 * checksum = (enet_uint32 *) & host -> receivedData [headerSize - sizeof (enet_uint32)]; + enet_uint32 desiredChecksum, newChecksum; + ENetBuffer buffer; + /* Checksum may be an unaligned pointer, use memcpy to avoid undefined behaviour. */ + memcpy (& desiredChecksum, checksum, sizeof (enet_uint32)); + + newChecksum = peer != NULL ? peer -> connectID : 0; + memcpy (checksum, & newChecksum, sizeof (enet_uint32)); + + buffer.data = host -> receivedData; + buffer.dataLength = host -> receivedDataLength; + + if (host -> checksum (& buffer, 1) != desiredChecksum) + return 0; + } + + if (peer != NULL) + { + peer -> address.host = host -> receivedAddress.host; + peer -> address.port = host -> receivedAddress.port; + peer -> incomingDataTotal += host -> receivedDataLength; + } + + currentData = host -> receivedData + headerSize; + + while (currentData < & host -> receivedData [host -> receivedDataLength]) + { + enet_uint8 commandNumber; + size_t commandSize; + + command = (ENetProtocol *) currentData; + + if (currentData + sizeof (ENetProtocolCommandHeader) > & host -> receivedData [host -> receivedDataLength]) + break; + + commandNumber = command -> header.command & ENET_PROTOCOL_COMMAND_MASK; + if (commandNumber >= ENET_PROTOCOL_COMMAND_COUNT) + break; + + commandSize = commandSizes [commandNumber]; + if (commandSize == 0 || currentData + commandSize > & host -> receivedData [host -> receivedDataLength]) + break; + + currentData += commandSize; + + if (peer == NULL && commandNumber != ENET_PROTOCOL_COMMAND_CONNECT) + break; + + command -> header.reliableSequenceNumber = ENET_NET_TO_HOST_16 (command -> header.reliableSequenceNumber); + + switch (commandNumber) + { + case ENET_PROTOCOL_COMMAND_ACKNOWLEDGE: + if (enet_protocol_handle_acknowledge (host, event, peer, command)) + goto commandError; + break; + + case ENET_PROTOCOL_COMMAND_CONNECT: + if (peer != NULL) + goto commandError; + peer = enet_protocol_handle_connect (host, header, command); + if (peer == NULL) + goto commandError; + break; + + case ENET_PROTOCOL_COMMAND_VERIFY_CONNECT: + if (enet_protocol_handle_verify_connect (host, event, peer, command)) + goto commandError; + break; + + case ENET_PROTOCOL_COMMAND_DISCONNECT: + if (enet_protocol_handle_disconnect (host, peer, command)) + goto commandError; + break; + + case ENET_PROTOCOL_COMMAND_PING: + if (enet_protocol_handle_ping (host, peer, command)) + goto commandError; + break; + + case ENET_PROTOCOL_COMMAND_SEND_RELIABLE: + if (enet_protocol_handle_send_reliable (host, peer, command, & currentData)) + goto commandError; + break; + + case ENET_PROTOCOL_COMMAND_SEND_UNRELIABLE: + if (enet_protocol_handle_send_unreliable (host, peer, command, & currentData)) + goto commandError; + break; + + case ENET_PROTOCOL_COMMAND_SEND_UNSEQUENCED: + if (enet_protocol_handle_send_unsequenced (host, peer, command, & currentData)) + goto commandError; + break; + + case ENET_PROTOCOL_COMMAND_SEND_FRAGMENT: + if (enet_protocol_handle_send_fragment (host, peer, command, & currentData)) + goto commandError; + break; + + case ENET_PROTOCOL_COMMAND_BANDWIDTH_LIMIT: + if (enet_protocol_handle_bandwidth_limit (host, peer, command)) + goto commandError; + break; + + case ENET_PROTOCOL_COMMAND_THROTTLE_CONFIGURE: + if (enet_protocol_handle_throttle_configure (host, peer, command)) + goto commandError; + break; + + case ENET_PROTOCOL_COMMAND_SEND_UNRELIABLE_FRAGMENT: + if (enet_protocol_handle_send_unreliable_fragment (host, peer, command, & currentData)) + goto commandError; + break; + + default: + goto commandError; + } + + if (peer != NULL && + (command -> header.command & ENET_PROTOCOL_COMMAND_FLAG_ACKNOWLEDGE) != 0) + { + enet_uint16 sentTime; + + if (! (flags & ENET_PROTOCOL_HEADER_FLAG_SENT_TIME)) + break; + + sentTime = ENET_NET_TO_HOST_16 (header -> sentTime); + + switch (peer -> state) + { + case ENET_PEER_STATE_DISCONNECTING: + case ENET_PEER_STATE_ACKNOWLEDGING_CONNECT: + case ENET_PEER_STATE_DISCONNECTED: + case ENET_PEER_STATE_ZOMBIE: + break; + + case ENET_PEER_STATE_ACKNOWLEDGING_DISCONNECT: + if ((command -> header.command & ENET_PROTOCOL_COMMAND_MASK) == ENET_PROTOCOL_COMMAND_DISCONNECT) + enet_peer_queue_acknowledgement (peer, command, sentTime); + break; + + default: + enet_peer_queue_acknowledgement (peer, command, sentTime); + break; + } + } + } + +commandError: + if (event != NULL && event -> type != ENET_EVENT_TYPE_NONE) + return 1; + + return 0; +} + +static int +enet_protocol_receive_incoming_commands (ENetHost * host, ENetEvent * event) +{ + int packets; + + for (packets = 0; packets < 256; ++ packets) + { + int receivedLength; + ENetBuffer buffer; + + buffer.data = host -> packetData [0]; + buffer.dataLength = sizeof (host -> packetData [0]); + + receivedLength = enet_socket_receive (host -> socket, + & host -> receivedAddress, + & buffer, + 1); + + if (receivedLength == -2) + continue; + + if (receivedLength < 0) + return -1; + + if (receivedLength == 0) + return 0; + + host -> receivedData = host -> packetData [0]; + host -> receivedDataLength = receivedLength; + + host -> totalReceivedData += receivedLength; + host -> totalReceivedPackets ++; + + if (host -> intercept != NULL) + { + switch (host -> intercept (host, event)) + { + case 1: + if (event != NULL && event -> type != ENET_EVENT_TYPE_NONE) + return 1; + + continue; + + case -1: + return -1; + + default: + break; + } + } + + switch (enet_protocol_handle_incoming_commands (host, event)) + { + case 1: + return 1; + + case -1: + return -1; + + default: + break; + } + } + + return 0; +} + +static void +enet_protocol_send_acknowledgements (ENetHost * host, ENetPeer * peer) +{ + ENetProtocol * command = & host -> commands [host -> commandCount]; + ENetBuffer * buffer = & host -> buffers [host -> bufferCount]; + ENetAcknowledgement * acknowledgement; + ENetListIterator currentAcknowledgement; + enet_uint16 reliableSequenceNumber; + + currentAcknowledgement = enet_list_begin (& peer -> acknowledgements); + + while (currentAcknowledgement != enet_list_end (& peer -> acknowledgements)) + { + if (command >= & host -> commands [sizeof (host -> commands) / sizeof (ENetProtocol)] || + buffer >= & host -> buffers [sizeof (host -> buffers) / sizeof (ENetBuffer)] || + peer -> mtu - host -> packetSize < sizeof (ENetProtocolAcknowledge)) + { + peer -> flags |= ENET_PEER_FLAG_CONTINUE_SENDING; + + break; + } + + acknowledgement = (ENetAcknowledgement *) currentAcknowledgement; + + currentAcknowledgement = enet_list_next (currentAcknowledgement); + + buffer -> data = command; + buffer -> dataLength = sizeof (ENetProtocolAcknowledge); + + host -> packetSize += buffer -> dataLength; + + reliableSequenceNumber = ENET_HOST_TO_NET_16 (acknowledgement -> command.header.reliableSequenceNumber); + + command -> header.command = ENET_PROTOCOL_COMMAND_ACKNOWLEDGE; + command -> header.channelID = acknowledgement -> command.header.channelID; + command -> header.reliableSequenceNumber = reliableSequenceNumber; + command -> acknowledge.receivedReliableSequenceNumber = reliableSequenceNumber; + command -> acknowledge.receivedSentTime = ENET_HOST_TO_NET_16 (acknowledgement -> sentTime); + + if ((acknowledgement -> command.header.command & ENET_PROTOCOL_COMMAND_MASK) == ENET_PROTOCOL_COMMAND_DISCONNECT) + enet_protocol_dispatch_state (host, peer, ENET_PEER_STATE_ZOMBIE); + + enet_list_remove (& acknowledgement -> acknowledgementList); + enet_free (acknowledgement); + + ++ command; + ++ buffer; + } + + host -> commandCount = command - host -> commands; + host -> bufferCount = buffer - host -> buffers; +} + +static int +enet_protocol_check_timeouts (ENetHost * host, ENetPeer * peer, ENetEvent * event) +{ + ENetOutgoingCommand * outgoingCommand; + ENetListIterator currentCommand, insertPosition, insertSendReliablePosition; + + currentCommand = enet_list_begin (& peer -> sentReliableCommands); + insertPosition = enet_list_begin (& peer -> outgoingCommands); + insertSendReliablePosition = enet_list_begin (& peer -> outgoingSendReliableCommands); + + while (currentCommand != enet_list_end (& peer -> sentReliableCommands)) + { + outgoingCommand = (ENetOutgoingCommand *) currentCommand; + + currentCommand = enet_list_next (currentCommand); + + if (ENET_TIME_DIFFERENCE (host -> serviceTime, outgoingCommand -> sentTime) < outgoingCommand -> roundTripTimeout) + continue; + + if (peer -> earliestTimeout == 0 || + ENET_TIME_LESS (outgoingCommand -> sentTime, peer -> earliestTimeout)) + peer -> earliestTimeout = outgoingCommand -> sentTime; + + if (peer -> earliestTimeout != 0 && + (ENET_TIME_DIFFERENCE (host -> serviceTime, peer -> earliestTimeout) >= peer -> timeoutMaximum || + ((1u << (outgoingCommand -> sendAttempts - 1)) >= peer -> timeoutLimit && + ENET_TIME_DIFFERENCE (host -> serviceTime, peer -> earliestTimeout) >= peer -> timeoutMinimum))) + { + enet_protocol_notify_disconnect (host, peer, event); + + return 1; + } + + ++ peer -> packetsLost; + + outgoingCommand -> roundTripTimeout *= 2; + + if (outgoingCommand -> packet != NULL) + { + peer -> reliableDataInTransit -= outgoingCommand -> fragmentLength; + + enet_list_insert (insertSendReliablePosition, enet_list_remove (& outgoingCommand -> outgoingCommandList)); + } + else + enet_list_insert (insertPosition, enet_list_remove (& outgoingCommand -> outgoingCommandList)); + + if (currentCommand == enet_list_begin (& peer -> sentReliableCommands) && + ! enet_list_empty (& peer -> sentReliableCommands)) + { + outgoingCommand = (ENetOutgoingCommand *) currentCommand; + + peer -> nextTimeout = outgoingCommand -> sentTime + outgoingCommand -> roundTripTimeout; + } + } + + return 0; +} + +static int +enet_protocol_check_outgoing_commands (ENetHost * host, ENetPeer * peer, ENetList * sentUnreliableCommands) +{ + ENetProtocol * command = & host -> commands [host -> commandCount]; + ENetBuffer * buffer = & host -> buffers [host -> bufferCount]; + ENetOutgoingCommand * outgoingCommand; + ENetListIterator currentCommand, currentSendReliableCommand; + ENetChannel *channel = NULL; + enet_uint16 reliableWindow = 0; + size_t commandSize; + int windowWrap = 0, canPing = 1; + + currentCommand = enet_list_begin (& peer -> outgoingCommands); + currentSendReliableCommand = enet_list_begin (& peer -> outgoingSendReliableCommands); + + for (;;) + { + if (currentCommand != enet_list_end (& peer -> outgoingCommands)) + { + outgoingCommand = (ENetOutgoingCommand *) currentCommand; + + if (currentSendReliableCommand != enet_list_end (& peer -> outgoingSendReliableCommands) && + ENET_TIME_LESS (((ENetOutgoingCommand *) currentSendReliableCommand) -> queueTime, outgoingCommand -> queueTime)) + goto useSendReliableCommand; + + currentCommand = enet_list_next (currentCommand); + } + else + if (currentSendReliableCommand != enet_list_end (& peer -> outgoingSendReliableCommands)) + { + useSendReliableCommand: + outgoingCommand = (ENetOutgoingCommand *) currentSendReliableCommand; + currentSendReliableCommand = enet_list_next (currentSendReliableCommand); + } + else + break; + + if (outgoingCommand -> command.header.command & ENET_PROTOCOL_COMMAND_FLAG_ACKNOWLEDGE) + { + channel = outgoingCommand -> command.header.channelID < peer -> channelCount ? & peer -> channels [outgoingCommand -> command.header.channelID] : NULL; + reliableWindow = outgoingCommand -> reliableSequenceNumber / ENET_PEER_RELIABLE_WINDOW_SIZE; + if (channel != NULL) + { + if (windowWrap) + continue; + else + if (outgoingCommand -> sendAttempts < 1 && + ! (outgoingCommand -> reliableSequenceNumber % ENET_PEER_RELIABLE_WINDOW_SIZE) && + (channel -> reliableWindows [(reliableWindow + ENET_PEER_RELIABLE_WINDOWS - 1) % ENET_PEER_RELIABLE_WINDOWS] >= ENET_PEER_RELIABLE_WINDOW_SIZE || + channel -> usedReliableWindows & ((((1u << (ENET_PEER_FREE_RELIABLE_WINDOWS + 2)) - 1) << reliableWindow) | + (((1u << (ENET_PEER_FREE_RELIABLE_WINDOWS + 2)) - 1) >> (ENET_PEER_RELIABLE_WINDOWS - reliableWindow))))) + { + windowWrap = 1; + currentSendReliableCommand = enet_list_end (& peer -> outgoingSendReliableCommands); + + continue; + } + } + + if (outgoingCommand -> packet != NULL) + { + enet_uint32 windowSize = (peer -> packetThrottle * peer -> windowSize) / ENET_PEER_PACKET_THROTTLE_SCALE; + + if (peer -> reliableDataInTransit + outgoingCommand -> fragmentLength > ENET_MAX (windowSize, peer -> mtu)) + { + currentSendReliableCommand = enet_list_end (& peer -> outgoingSendReliableCommands); + + continue; + } + } + + canPing = 0; + } + + commandSize = commandSizes [outgoingCommand -> command.header.command & ENET_PROTOCOL_COMMAND_MASK]; + if (command >= & host -> commands [sizeof (host -> commands) / sizeof (ENetProtocol)] || + buffer + 1 >= & host -> buffers [sizeof (host -> buffers) / sizeof (ENetBuffer)] || + peer -> mtu - host -> packetSize < commandSize || + (outgoingCommand -> packet != NULL && + (enet_uint16) (peer -> mtu - host -> packetSize) < (enet_uint16) (commandSize + outgoingCommand -> fragmentLength))) + { + peer -> flags |= ENET_PEER_FLAG_CONTINUE_SENDING; + + break; + } + + if (outgoingCommand -> command.header.command & ENET_PROTOCOL_COMMAND_FLAG_ACKNOWLEDGE) + { + if (channel != NULL && outgoingCommand -> sendAttempts < 1) + { + channel -> usedReliableWindows |= 1u << reliableWindow; + ++ channel -> reliableWindows [reliableWindow]; + } + + ++ outgoingCommand -> sendAttempts; + + if (outgoingCommand -> roundTripTimeout == 0) + outgoingCommand -> roundTripTimeout = peer -> roundTripTime + 4 * peer -> roundTripTimeVariance; + + if (enet_list_empty (& peer -> sentReliableCommands)) + peer -> nextTimeout = host -> serviceTime + outgoingCommand -> roundTripTimeout; + + enet_list_insert (enet_list_end (& peer -> sentReliableCommands), + enet_list_remove (& outgoingCommand -> outgoingCommandList)); + + outgoingCommand -> sentTime = host -> serviceTime; + + host -> headerFlags |= ENET_PROTOCOL_HEADER_FLAG_SENT_TIME; + + peer -> reliableDataInTransit += outgoingCommand -> fragmentLength; + } + else + { + if (outgoingCommand -> packet != NULL && outgoingCommand -> fragmentOffset == 0) + { + peer -> packetThrottleCounter += ENET_PEER_PACKET_THROTTLE_COUNTER; + peer -> packetThrottleCounter %= ENET_PEER_PACKET_THROTTLE_SCALE; + + if (peer -> packetThrottleCounter > peer -> packetThrottle) + { + enet_uint16 reliableSequenceNumber = outgoingCommand -> reliableSequenceNumber, + unreliableSequenceNumber = outgoingCommand -> unreliableSequenceNumber; + for (;;) + { + -- outgoingCommand -> packet -> referenceCount; + + if (outgoingCommand -> packet -> referenceCount == 0) + enet_packet_destroy (outgoingCommand -> packet); + + enet_list_remove (& outgoingCommand -> outgoingCommandList); + enet_free (outgoingCommand); + + if (currentCommand == enet_list_end (& peer -> outgoingCommands)) + break; + + outgoingCommand = (ENetOutgoingCommand *) currentCommand; + if (outgoingCommand -> reliableSequenceNumber != reliableSequenceNumber || + outgoingCommand -> unreliableSequenceNumber != unreliableSequenceNumber) + break; + + currentCommand = enet_list_next (currentCommand); + } + + continue; + } + } + + enet_list_remove (& outgoingCommand -> outgoingCommandList); + + if (outgoingCommand -> packet != NULL) + enet_list_insert (enet_list_end (sentUnreliableCommands), outgoingCommand); + } + + buffer -> data = command; + buffer -> dataLength = commandSize; + + host -> packetSize += buffer -> dataLength; + + * command = outgoingCommand -> command; + + if (outgoingCommand -> packet != NULL) + { + ++ buffer; + + buffer -> data = outgoingCommand -> packet -> data + outgoingCommand -> fragmentOffset; + buffer -> dataLength = outgoingCommand -> fragmentLength; + + host -> packetSize += outgoingCommand -> fragmentLength; + } + else + if (! (outgoingCommand -> command.header.command & ENET_PROTOCOL_COMMAND_FLAG_ACKNOWLEDGE)) + enet_free (outgoingCommand); + + ++ peer -> packetsSent; + + ++ command; + ++ buffer; + } + + host -> commandCount = command - host -> commands; + host -> bufferCount = buffer - host -> buffers; + + if (peer -> state == ENET_PEER_STATE_DISCONNECT_LATER && + ! enet_peer_has_outgoing_commands (peer) && + enet_list_empty (sentUnreliableCommands)) + enet_peer_disconnect (peer, peer -> eventData); + + return canPing; +} + +static int +enet_protocol_send_outgoing_commands (ENetHost * host, ENetEvent * event, int checkForTimeouts) +{ + enet_uint8 headerData [sizeof (ENetProtocolHeader) + sizeof (enet_uint32)]; + ENetProtocolHeader * header = (ENetProtocolHeader *) headerData; + int sentLength = 0; + size_t shouldCompress = 0; + ENetList sentUnreliableCommands; + + enet_list_clear (& sentUnreliableCommands); + + for (int sendPass = 0, continueSending = 0; sendPass <= continueSending; ++ sendPass) + for (ENetPeer * currentPeer = host -> peers; + currentPeer < & host -> peers [host -> peerCount]; + ++ currentPeer) + { + if (currentPeer -> state == ENET_PEER_STATE_DISCONNECTED || + currentPeer -> state == ENET_PEER_STATE_ZOMBIE || + (sendPass > 0 && ! (currentPeer -> flags & ENET_PEER_FLAG_CONTINUE_SENDING))) + continue; + + currentPeer -> flags &= ~ ENET_PEER_FLAG_CONTINUE_SENDING; + + host -> headerFlags = 0; + host -> commandCount = 0; + host -> bufferCount = 1; + host -> packetSize = sizeof (ENetProtocolHeader); + + if (! enet_list_empty (& currentPeer -> acknowledgements)) + enet_protocol_send_acknowledgements (host, currentPeer); + + if (checkForTimeouts != 0 && + ! enet_list_empty (& currentPeer -> sentReliableCommands) && + ENET_TIME_GREATER_EQUAL (host -> serviceTime, currentPeer -> nextTimeout) && + enet_protocol_check_timeouts (host, currentPeer, event) == 1) + { + if (event != NULL && event -> type != ENET_EVENT_TYPE_NONE) + return 1; + else + goto nextPeer; + } + + if (((enet_list_empty (& currentPeer -> outgoingCommands) && + enet_list_empty (& currentPeer -> outgoingSendReliableCommands)) || + enet_protocol_check_outgoing_commands (host, currentPeer, & sentUnreliableCommands)) && + enet_list_empty (& currentPeer -> sentReliableCommands) && + ENET_TIME_DIFFERENCE (host -> serviceTime, currentPeer -> lastReceiveTime) >= currentPeer -> pingInterval && + currentPeer -> mtu - host -> packetSize >= sizeof (ENetProtocolPing)) + { + enet_peer_ping (currentPeer); + enet_protocol_check_outgoing_commands (host, currentPeer, & sentUnreliableCommands); + } + + if (host -> commandCount == 0) + goto nextPeer; + + if (currentPeer -> packetLossEpoch == 0) + currentPeer -> packetLossEpoch = host -> serviceTime; + else + if (ENET_TIME_DIFFERENCE (host -> serviceTime, currentPeer -> packetLossEpoch) >= ENET_PEER_PACKET_LOSS_INTERVAL && + currentPeer -> packetsSent > 0) + { + enet_uint32 packetLoss = currentPeer -> packetsLost * ENET_PEER_PACKET_LOSS_SCALE / currentPeer -> packetsSent; + +#ifdef ENET_DEBUG + printf ("peer %u: %f%%+-%f%% packet loss, %u+-%u ms round trip time, %f%% throttle, %u outgoing, %u/%u incoming\n", currentPeer -> incomingPeerID, currentPeer -> packetLoss / (float) ENET_PEER_PACKET_LOSS_SCALE, currentPeer -> packetLossVariance / (float) ENET_PEER_PACKET_LOSS_SCALE, currentPeer -> roundTripTime, currentPeer -> roundTripTimeVariance, currentPeer -> packetThrottle / (float) ENET_PEER_PACKET_THROTTLE_SCALE, enet_list_size (& currentPeer -> outgoingCommands) + enet_list_size (& currentPeer -> outgoingSendReliableCommands), currentPeer -> channels != NULL ? enet_list_size (& currentPeer -> channels -> incomingReliableCommands) : 0, currentPeer -> channels != NULL ? enet_list_size (& currentPeer -> channels -> incomingUnreliableCommands) : 0); +#endif + + currentPeer -> packetLossVariance = (currentPeer -> packetLossVariance * 3 + ENET_DIFFERENCE (packetLoss, currentPeer -> packetLoss)) / 4; + currentPeer -> packetLoss = (currentPeer -> packetLoss * 7 + packetLoss) / 8; + + currentPeer -> packetLossEpoch = host -> serviceTime; + currentPeer -> packetsSent = 0; + currentPeer -> packetsLost = 0; + } + + host -> buffers -> data = headerData; + if (host -> headerFlags & ENET_PROTOCOL_HEADER_FLAG_SENT_TIME) + { + header -> sentTime = ENET_HOST_TO_NET_16 (host -> serviceTime & 0xFFFF); + + host -> buffers -> dataLength = sizeof (ENetProtocolHeader); + } + else + host -> buffers -> dataLength = (size_t) & ((ENetProtocolHeader *) 0) -> sentTime; + + shouldCompress = 0; + if (host -> compressor.context != NULL && host -> compressor.compress != NULL) + { + size_t originalSize = host -> packetSize - sizeof(ENetProtocolHeader), + compressedSize = host -> compressor.compress (host -> compressor.context, + & host -> buffers [1], host -> bufferCount - 1, + originalSize, + host -> packetData [1], + originalSize); + if (compressedSize > 0 && compressedSize < originalSize) + { + host -> headerFlags |= ENET_PROTOCOL_HEADER_FLAG_COMPRESSED; + shouldCompress = compressedSize; +#ifdef ENET_DEBUG_COMPRESS + printf ("peer %u: compressed %u -> %u (%u%%)\n", currentPeer -> incomingPeerID, originalSize, compressedSize, (compressedSize * 100) / originalSize); +#endif + } + } + + if (currentPeer -> outgoingPeerID < ENET_PROTOCOL_MAXIMUM_PEER_ID) + host -> headerFlags |= currentPeer -> outgoingSessionID << ENET_PROTOCOL_HEADER_SESSION_SHIFT; + header -> peerID = ENET_HOST_TO_NET_16 (currentPeer -> outgoingPeerID | host -> headerFlags); + if (host -> checksum != NULL) + { + enet_uint32 * checksum = (enet_uint32 *) & headerData [host -> buffers -> dataLength]; + enet_uint32 newChecksum = currentPeer -> outgoingPeerID < ENET_PROTOCOL_MAXIMUM_PEER_ID ? currentPeer -> connectID : 0; + /* Checksum may be unaligned, use memcpy to avoid undefined behaviour. */ + memcpy(checksum, & newChecksum, sizeof (enet_uint32)); + host -> buffers -> dataLength += sizeof (enet_uint32); + newChecksum = host -> checksum (host -> buffers, host -> bufferCount); + memcpy(checksum, & newChecksum, sizeof (enet_uint32)); + } + + if (shouldCompress > 0) + { + host -> buffers [1].data = host -> packetData [1]; + host -> buffers [1].dataLength = shouldCompress; + host -> bufferCount = 2; + } + + currentPeer -> lastSendTime = host -> serviceTime; + + sentLength = enet_socket_send (host -> socket, & currentPeer -> address, host -> buffers, host -> bufferCount); + + enet_protocol_remove_sent_unreliable_commands (currentPeer, & sentUnreliableCommands); + + if (sentLength < 0) + return -1; + + host -> totalSentData += sentLength; + host -> totalSentPackets ++; + + nextPeer: + if (currentPeer -> flags & ENET_PEER_FLAG_CONTINUE_SENDING) + continueSending = sendPass + 1; + } + + return 0; +} + +/** Sends any queued packets on the host specified to its designated peers. + + @param host host to flush + @remarks this function need only be used in circumstances where one wishes to send queued packets earlier than in a call to enet_host_service(). + @ingroup host +*/ +void +enet_host_flush (ENetHost * host) +{ + host -> serviceTime = enet_time_get (); + + enet_protocol_send_outgoing_commands (host, NULL, 0); +} + +/** Checks for any queued events on the host and dispatches one if available. + + @param host host to check for events + @param event an event structure where event details will be placed if available + @retval > 0 if an event was dispatched + @retval 0 if no events are available + @retval < 0 on failure + @ingroup host +*/ +int +enet_host_check_events (ENetHost * host, ENetEvent * event) +{ + if (event == NULL) return -1; + + event -> type = ENET_EVENT_TYPE_NONE; + event -> peer = NULL; + event -> packet = NULL; + + return enet_protocol_dispatch_incoming_commands (host, event); +} + +/** Waits for events on the host specified and shuttles packets between + the host and its peers. + + @param host host to service + @param event an event structure where event details will be placed if one occurs + if event == NULL then no events will be delivered + @param timeout number of milliseconds that ENet should wait for events + @retval > 0 if an event occurred within the specified time limit + @retval 0 if no event occurred + @retval < 0 on failure + @remarks enet_host_service should be called fairly regularly for adequate performance + @ingroup host +*/ +int +enet_host_service (ENetHost * host, ENetEvent * event, enet_uint32 timeout) +{ + enet_uint32 waitCondition; + + if (event != NULL) + { + event -> type = ENET_EVENT_TYPE_NONE; + event -> peer = NULL; + event -> packet = NULL; + + switch (enet_protocol_dispatch_incoming_commands (host, event)) + { + case 1: + return 1; + + case -1: +#ifdef ENET_DEBUG + perror ("Error dispatching incoming packets"); +#endif + + return -1; + + default: + break; + } + } + + host -> serviceTime = enet_time_get (); + + timeout += host -> serviceTime; + + do + { + if (ENET_TIME_DIFFERENCE (host -> serviceTime, host -> bandwidthThrottleEpoch) >= ENET_HOST_BANDWIDTH_THROTTLE_INTERVAL) + enet_host_bandwidth_throttle (host); + + switch (enet_protocol_send_outgoing_commands (host, event, 1)) + { + case 1: + return 1; + + case -1: +#ifdef ENET_DEBUG + perror ("Error sending outgoing packets"); +#endif + + return -1; + + default: + break; + } + + switch (enet_protocol_receive_incoming_commands (host, event)) + { + case 1: + return 1; + + case -1: +#ifdef ENET_DEBUG + perror ("Error receiving incoming packets"); +#endif + + return -1; + + default: + break; + } + + switch (enet_protocol_send_outgoing_commands (host, event, 1)) + { + case 1: + return 1; + + case -1: +#ifdef ENET_DEBUG + perror ("Error sending outgoing packets"); +#endif + + return -1; + + default: + break; + } + + if (event != NULL) + { + switch (enet_protocol_dispatch_incoming_commands (host, event)) + { + case 1: + return 1; + + case -1: +#ifdef ENET_DEBUG + perror ("Error dispatching incoming packets"); +#endif + + return -1; + + default: + break; + } + } + + if (ENET_TIME_GREATER_EQUAL (host -> serviceTime, timeout)) + return 0; + + do + { + host -> serviceTime = enet_time_get (); + + if (ENET_TIME_GREATER_EQUAL (host -> serviceTime, timeout)) + return 0; + + waitCondition = ENET_SOCKET_WAIT_RECEIVE | ENET_SOCKET_WAIT_INTERRUPT; + + if (enet_socket_wait (host -> socket, & waitCondition, ENET_TIME_DIFFERENCE (timeout, host -> serviceTime)) != 0) + return -1; + } + while (waitCondition & ENET_SOCKET_WAIT_INTERRUPT); + + host -> serviceTime = enet_time_get (); + } while (waitCondition & ENET_SOCKET_WAIT_RECEIVE); + + return 0; +} + diff --git a/pkg/enet/enet/unix.c b/pkg/enet/enet/unix.c new file mode 100644 index 000000000..a66bc33da --- /dev/null +++ b/pkg/enet/enet/unix.c @@ -0,0 +1,630 @@ +/** + @file unix.c + @brief ENet Unix system specific functions +*/ +#ifndef _WIN32 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ENET_BUILDING_LIB 1 +#include "enet/enet.h" + +#ifdef __APPLE__ +#ifdef HAS_POLL +#undef HAS_POLL +#endif +#ifndef HAS_FCNTL +#define HAS_FCNTL 1 +#endif +#ifndef HAS_INET_PTON +#define HAS_INET_PTON 1 +#endif +#ifndef HAS_INET_NTOP +#define HAS_INET_NTOP 1 +#endif +#ifndef HAS_MSGHDR_FLAGS +#define HAS_MSGHDR_FLAGS 1 +#endif +#ifndef HAS_SOCKLEN_T +#define HAS_SOCKLEN_T 1 +#endif +#ifndef HAS_GETADDRINFO +#define HAS_GETADDRINFO 1 +#endif +#ifndef HAS_GETNAMEINFO +#define HAS_GETNAMEINFO 1 +#endif +#endif + +#ifdef HAS_FCNTL +#include +#endif + +#ifdef HAS_POLL +#include +#endif + +#if !defined(HAS_SOCKLEN_T) && !defined(__socklen_t_defined) +typedef int socklen_t; +#endif + +#ifndef MSG_NOSIGNAL +#define MSG_NOSIGNAL 0 +#endif + +static enet_uint32 timeBase = 0; + +int +enet_initialize (void) +{ + return 0; +} + +void +enet_deinitialize (void) +{ +} + +enet_uint32 +enet_host_random_seed (void) +{ + return (enet_uint32) time (NULL); +} + +enet_uint32 +enet_time_get (void) +{ + struct timeval timeVal; + + gettimeofday (& timeVal, NULL); + + return timeVal.tv_sec * 1000 + timeVal.tv_usec / 1000 - timeBase; +} + +void +enet_time_set (enet_uint32 newTimeBase) +{ + struct timeval timeVal; + + gettimeofday (& timeVal, NULL); + + timeBase = timeVal.tv_sec * 1000 + timeVal.tv_usec / 1000 - newTimeBase; +} + +int +enet_address_set_host_ip (ENetAddress * address, const char * name) +{ +#ifdef HAS_INET_PTON + if (! inet_pton (AF_INET, name, & address -> host)) +#else + if (! inet_aton (name, (struct in_addr *) & address -> host)) +#endif + return -1; + + return 0; +} + +int +enet_address_set_host (ENetAddress * address, const char * name) +{ +#ifdef HAS_GETADDRINFO + struct addrinfo hints, * resultList = NULL, * result = NULL; + + memset (& hints, 0, sizeof (hints)); + hints.ai_family = AF_INET; + + if (getaddrinfo (name, NULL, NULL, & resultList) != 0) + return -1; + + for (result = resultList; result != NULL; result = result -> ai_next) + { + if (result -> ai_family == AF_INET && result -> ai_addr != NULL && result -> ai_addrlen >= sizeof (struct sockaddr_in)) + { + struct sockaddr_in * sin = (struct sockaddr_in *) result -> ai_addr; + + address -> host = sin -> sin_addr.s_addr; + + freeaddrinfo (resultList); + + return 0; + } + } + + if (resultList != NULL) + freeaddrinfo (resultList); +#else + struct hostent * hostEntry = NULL; +#ifdef HAS_GETHOSTBYNAME_R + struct hostent hostData; + char buffer [2048]; + int errnum; + +#if defined(linux) || defined(__linux) || defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__) || defined(__GNU__) + gethostbyname_r (name, & hostData, buffer, sizeof (buffer), & hostEntry, & errnum); +#else + hostEntry = gethostbyname_r (name, & hostData, buffer, sizeof (buffer), & errnum); +#endif +#else + hostEntry = gethostbyname (name); +#endif + + if (hostEntry != NULL && hostEntry -> h_addrtype == AF_INET) + { + address -> host = * (enet_uint32 *) hostEntry -> h_addr_list [0]; + + return 0; + } +#endif + + return enet_address_set_host_ip (address, name); +} + +int +enet_address_get_host_ip (const ENetAddress * address, char * name, size_t nameLength) +{ +#ifdef HAS_INET_NTOP + if (inet_ntop (AF_INET, & address -> host, name, nameLength) == NULL) +#else + char * addr = inet_ntoa (* (struct in_addr *) & address -> host); + if (addr != NULL) + { + size_t addrLen = strlen(addr); + if (addrLen >= nameLength) + return -1; + memcpy (name, addr, addrLen + 1); + } + else +#endif + return -1; + return 0; +} + +int +enet_address_get_host (const ENetAddress * address, char * name, size_t nameLength) +{ +#ifdef HAS_GETNAMEINFO + struct sockaddr_in sin; + int err; + + memset (& sin, 0, sizeof (struct sockaddr_in)); + + sin.sin_family = AF_INET; + sin.sin_port = ENET_HOST_TO_NET_16 (address -> port); + sin.sin_addr.s_addr = address -> host; + + err = getnameinfo ((struct sockaddr *) & sin, sizeof (sin), name, nameLength, NULL, 0, NI_NAMEREQD); + if (! err) + { + if (name != NULL && nameLength > 0 && ! memchr (name, '\0', nameLength)) + return -1; + return 0; + } + if (err != EAI_NONAME) + return -1; +#else + struct in_addr in; + struct hostent * hostEntry = NULL; +#ifdef HAS_GETHOSTBYADDR_R + struct hostent hostData; + char buffer [2048]; + int errnum; + + in.s_addr = address -> host; + +#if defined(linux) || defined(__linux) || defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__) || defined(__GNU__) + gethostbyaddr_r ((char *) & in, sizeof (struct in_addr), AF_INET, & hostData, buffer, sizeof (buffer), & hostEntry, & errnum); +#else + hostEntry = gethostbyaddr_r ((char *) & in, sizeof (struct in_addr), AF_INET, & hostData, buffer, sizeof (buffer), & errnum); +#endif +#else + in.s_addr = address -> host; + + hostEntry = gethostbyaddr ((char *) & in, sizeof (struct in_addr), AF_INET); +#endif + + if (hostEntry != NULL) + { + size_t hostLen = strlen (hostEntry -> h_name); + if (hostLen >= nameLength) + return -1; + memcpy (name, hostEntry -> h_name, hostLen + 1); + return 0; + } +#endif + + return enet_address_get_host_ip (address, name, nameLength); +} + +int +enet_socket_bind (ENetSocket socket, const ENetAddress * address) +{ + struct sockaddr_in sin; + + memset (& sin, 0, sizeof (struct sockaddr_in)); + + sin.sin_family = AF_INET; + + if (address != NULL) + { + sin.sin_port = ENET_HOST_TO_NET_16 (address -> port); + sin.sin_addr.s_addr = address -> host; + } + else + { + sin.sin_port = 0; + sin.sin_addr.s_addr = INADDR_ANY; + } + + return bind (socket, + (struct sockaddr *) & sin, + sizeof (struct sockaddr_in)); +} + +int +enet_socket_get_address (ENetSocket socket, ENetAddress * address) +{ + struct sockaddr_in sin; + socklen_t sinLength = sizeof (struct sockaddr_in); + + if (getsockname (socket, (struct sockaddr *) & sin, & sinLength) == -1) + return -1; + + address -> host = (enet_uint32) sin.sin_addr.s_addr; + address -> port = ENET_NET_TO_HOST_16 (sin.sin_port); + + return 0; +} + +int +enet_socket_listen (ENetSocket socket, int backlog) +{ + return listen (socket, backlog < 0 ? SOMAXCONN : backlog); +} + +ENetSocket +enet_socket_create (ENetSocketType type) +{ + return socket (PF_INET, type == ENET_SOCKET_TYPE_DATAGRAM ? SOCK_DGRAM : SOCK_STREAM, 0); +} + +int +enet_socket_set_option (ENetSocket socket, ENetSocketOption option, int value) +{ + int result = -1; + switch (option) + { + case ENET_SOCKOPT_NONBLOCK: +#ifdef HAS_FCNTL + result = fcntl (socket, F_SETFL, (value ? O_NONBLOCK : 0) | (fcntl (socket, F_GETFL) & ~O_NONBLOCK)); +#else + result = ioctl (socket, FIONBIO, & value); +#endif + break; + + case ENET_SOCKOPT_BROADCAST: + result = setsockopt (socket, SOL_SOCKET, SO_BROADCAST, (char *) & value, sizeof (int)); + break; + + case ENET_SOCKOPT_REUSEADDR: + result = setsockopt (socket, SOL_SOCKET, SO_REUSEADDR, (char *) & value, sizeof (int)); + break; + + case ENET_SOCKOPT_RCVBUF: + result = setsockopt (socket, SOL_SOCKET, SO_RCVBUF, (char *) & value, sizeof (int)); + break; + + case ENET_SOCKOPT_SNDBUF: + result = setsockopt (socket, SOL_SOCKET, SO_SNDBUF, (char *) & value, sizeof (int)); + break; + + case ENET_SOCKOPT_RCVTIMEO: + { + struct timeval timeVal; + timeVal.tv_sec = value / 1000; + timeVal.tv_usec = (value % 1000) * 1000; + result = setsockopt (socket, SOL_SOCKET, SO_RCVTIMEO, (char *) & timeVal, sizeof (struct timeval)); + break; + } + + case ENET_SOCKOPT_SNDTIMEO: + { + struct timeval timeVal; + timeVal.tv_sec = value / 1000; + timeVal.tv_usec = (value % 1000) * 1000; + result = setsockopt (socket, SOL_SOCKET, SO_SNDTIMEO, (char *) & timeVal, sizeof (struct timeval)); + break; + } + + case ENET_SOCKOPT_NODELAY: + result = setsockopt (socket, IPPROTO_TCP, TCP_NODELAY, (char *) & value, sizeof (int)); + break; + + case ENET_SOCKOPT_TTL: + result = setsockopt (socket, IPPROTO_IP, IP_TTL, (char *) & value, sizeof (int)); + break; + + default: + break; + } + return result == -1 ? -1 : 0; +} + +int +enet_socket_get_option (ENetSocket socket, ENetSocketOption option, int * value) +{ + int result = -1; + socklen_t len; + switch (option) + { + case ENET_SOCKOPT_ERROR: + len = sizeof (int); + result = getsockopt (socket, SOL_SOCKET, SO_ERROR, value, & len); + break; + + case ENET_SOCKOPT_TTL: + len = sizeof (int); + result = getsockopt (socket, IPPROTO_IP, IP_TTL, (char *) value, & len); + break; + + default: + break; + } + return result == -1 ? -1 : 0; +} + +int +enet_socket_connect (ENetSocket socket, const ENetAddress * address) +{ + struct sockaddr_in sin; + int result; + + memset (& sin, 0, sizeof (struct sockaddr_in)); + + sin.sin_family = AF_INET; + sin.sin_port = ENET_HOST_TO_NET_16 (address -> port); + sin.sin_addr.s_addr = address -> host; + + result = connect (socket, (struct sockaddr *) & sin, sizeof (struct sockaddr_in)); + if (result == -1 && errno == EINPROGRESS) + return 0; + + return result; +} + +ENetSocket +enet_socket_accept (ENetSocket socket, ENetAddress * address) +{ + int result; + struct sockaddr_in sin; + socklen_t sinLength = sizeof (struct sockaddr_in); + + result = accept (socket, + address != NULL ? (struct sockaddr *) & sin : NULL, + address != NULL ? & sinLength : NULL); + + if (result == -1) + return ENET_SOCKET_NULL; + + if (address != NULL) + { + address -> host = (enet_uint32) sin.sin_addr.s_addr; + address -> port = ENET_NET_TO_HOST_16 (sin.sin_port); + } + + return result; +} + +int +enet_socket_shutdown (ENetSocket socket, ENetSocketShutdown how) +{ + return shutdown (socket, (int) how); +} + +void +enet_socket_destroy (ENetSocket socket) +{ + if (socket != -1) + close (socket); +} + +int +enet_socket_send (ENetSocket socket, + const ENetAddress * address, + const ENetBuffer * buffers, + size_t bufferCount) +{ + struct msghdr msgHdr; + struct sockaddr_in sin; + int sentLength; + + memset (& msgHdr, 0, sizeof (struct msghdr)); + + if (address != NULL) + { + memset (& sin, 0, sizeof (struct sockaddr_in)); + + sin.sin_family = AF_INET; + sin.sin_port = ENET_HOST_TO_NET_16 (address -> port); + sin.sin_addr.s_addr = address -> host; + + msgHdr.msg_name = & sin; + msgHdr.msg_namelen = sizeof (struct sockaddr_in); + } + + msgHdr.msg_iov = (struct iovec *) buffers; + msgHdr.msg_iovlen = bufferCount; + + sentLength = sendmsg (socket, & msgHdr, MSG_NOSIGNAL); + + if (sentLength == -1) + { + if (errno == EWOULDBLOCK) + return 0; + + return -1; + } + + return sentLength; +} + +int +enet_socket_receive (ENetSocket socket, + ENetAddress * address, + ENetBuffer * buffers, + size_t bufferCount) +{ + struct msghdr msgHdr; + struct sockaddr_in sin; + int recvLength; + + memset (& msgHdr, 0, sizeof (struct msghdr)); + + if (address != NULL) + { + msgHdr.msg_name = & sin; + msgHdr.msg_namelen = sizeof (struct sockaddr_in); + } + + msgHdr.msg_iov = (struct iovec *) buffers; + msgHdr.msg_iovlen = bufferCount; + + recvLength = recvmsg (socket, & msgHdr, MSG_NOSIGNAL); + + if (recvLength == -1) + { + switch (errno) + { + case EWOULDBLOCK: + return 0; + case EINTR: + case EMSGSIZE: + return -2; + default: + return -1; + } + } + +#ifdef HAS_MSGHDR_FLAGS + if (msgHdr.msg_flags & MSG_TRUNC) + return -2; +#endif + + if (address != NULL) + { + address -> host = (enet_uint32) sin.sin_addr.s_addr; + address -> port = ENET_NET_TO_HOST_16 (sin.sin_port); + } + + return recvLength; +} + +int +enet_socketset_select (ENetSocket maxSocket, ENetSocketSet * readSet, ENetSocketSet * writeSet, enet_uint32 timeout) +{ + struct timeval timeVal; + + timeVal.tv_sec = timeout / 1000; + timeVal.tv_usec = (timeout % 1000) * 1000; + + return select (maxSocket + 1, readSet, writeSet, NULL, & timeVal); +} + +int +enet_socket_wait (ENetSocket socket, enet_uint32 * condition, enet_uint32 timeout) +{ +#ifdef HAS_POLL + struct pollfd pollSocket; + int pollCount; + + pollSocket.fd = socket; + pollSocket.events = 0; + + if (* condition & ENET_SOCKET_WAIT_SEND) + pollSocket.events |= POLLOUT; + + if (* condition & ENET_SOCKET_WAIT_RECEIVE) + pollSocket.events |= POLLIN; + + pollCount = poll (& pollSocket, 1, timeout); + + if (pollCount < 0) + { + if (errno == EINTR && * condition & ENET_SOCKET_WAIT_INTERRUPT) + { + * condition = ENET_SOCKET_WAIT_INTERRUPT; + + return 0; + } + + return -1; + } + + * condition = ENET_SOCKET_WAIT_NONE; + + if (pollCount == 0) + return 0; + + if (pollSocket.revents & POLLOUT) + * condition |= ENET_SOCKET_WAIT_SEND; + + if (pollSocket.revents & POLLIN) + * condition |= ENET_SOCKET_WAIT_RECEIVE; + + return 0; +#else + fd_set readSet, writeSet; + struct timeval timeVal; + int selectCount; + + timeVal.tv_sec = timeout / 1000; + timeVal.tv_usec = (timeout % 1000) * 1000; + + FD_ZERO (& readSet); + FD_ZERO (& writeSet); + + if (* condition & ENET_SOCKET_WAIT_SEND) + FD_SET (socket, & writeSet); + + if (* condition & ENET_SOCKET_WAIT_RECEIVE) + FD_SET (socket, & readSet); + + selectCount = select (socket + 1, & readSet, & writeSet, NULL, & timeVal); + + if (selectCount < 0) + { + if (errno == EINTR && * condition & ENET_SOCKET_WAIT_INTERRUPT) + { + * condition = ENET_SOCKET_WAIT_INTERRUPT; + + return 0; + } + + return -1; + } + + * condition = ENET_SOCKET_WAIT_NONE; + + if (selectCount == 0) + return 0; + + if (FD_ISSET (socket, & writeSet)) + * condition |= ENET_SOCKET_WAIT_SEND; + + if (FD_ISSET (socket, & readSet)) + * condition |= ENET_SOCKET_WAIT_RECEIVE; + + return 0; +#endif +} + +#endif + diff --git a/pkg/enet/enet/win32.c b/pkg/enet/enet/win32.c new file mode 100644 index 000000000..578a3227b --- /dev/null +++ b/pkg/enet/enet/win32.c @@ -0,0 +1,455 @@ +/** + @file win32.c + @brief ENet Win32 system specific functions +*/ +#ifdef _WIN32 + +#define ENET_BUILDING_LIB 1 +#include "enet/enet.h" +#include +#include +#include + +static enet_uint32 timeBase = 0; + +int +enet_initialize (void) +{ + WORD versionRequested = MAKEWORD (1, 1); + WSADATA wsaData; + + if (WSAStartup (versionRequested, & wsaData)) + return -1; + + if (LOBYTE (wsaData.wVersion) != 1|| + HIBYTE (wsaData.wVersion) != 1) + { + WSACleanup (); + + return -1; + } + + timeBeginPeriod (1); + + return 0; +} + +void +enet_deinitialize (void) +{ + timeEndPeriod (1); + + WSACleanup (); +} + +enet_uint32 +enet_host_random_seed (void) +{ + return (enet_uint32) timeGetTime (); +} + +enet_uint32 +enet_time_get (void) +{ + return (enet_uint32) timeGetTime () - timeBase; +} + +void +enet_time_set (enet_uint32 newTimeBase) +{ + timeBase = (enet_uint32) timeGetTime () - newTimeBase; +} + +int +enet_address_set_host_ip (ENetAddress * address, const char * name) +{ + enet_uint8 vals [4] = { 0, 0, 0, 0 }; + int i; + + for (i = 0; i < 4; ++ i) + { + const char * next = name + 1; + if (* name != '0') + { + long val = strtol (name, (char **) & next, 10); + if (val < 0 || val > 255 || next == name || next - name > 3) + return -1; + vals [i] = (enet_uint8) val; + } + + if (* next != (i < 3 ? '.' : '\0')) + return -1; + name = next + 1; + } + + memcpy (& address -> host, vals, sizeof (enet_uint32)); + return 0; +} + +int +enet_address_set_host (ENetAddress * address, const char * name) +{ + struct hostent * hostEntry; + + hostEntry = gethostbyname (name); + if (hostEntry == NULL || + hostEntry -> h_addrtype != AF_INET) + return enet_address_set_host_ip (address, name); + + address -> host = * (enet_uint32 *) hostEntry -> h_addr_list [0]; + + return 0; +} + +int +enet_address_get_host_ip (const ENetAddress * address, char * name, size_t nameLength) +{ + char * addr = inet_ntoa (* (struct in_addr *) & address -> host); + if (addr == NULL) + return -1; + else + { + size_t addrLen = strlen(addr); + if (addrLen >= nameLength) + return -1; + memcpy (name, addr, addrLen + 1); + } + return 0; +} + +int +enet_address_get_host (const ENetAddress * address, char * name, size_t nameLength) +{ + struct in_addr in; + struct hostent * hostEntry; + + in.s_addr = address -> host; + + hostEntry = gethostbyaddr ((char *) & in, sizeof (struct in_addr), AF_INET); + if (hostEntry == NULL) + return enet_address_get_host_ip (address, name, nameLength); + else + { + size_t hostLen = strlen (hostEntry -> h_name); + if (hostLen >= nameLength) + return -1; + memcpy (name, hostEntry -> h_name, hostLen + 1); + } + + return 0; +} + +int +enet_socket_bind (ENetSocket socket, const ENetAddress * address) +{ + struct sockaddr_in sin; + + memset (& sin, 0, sizeof (struct sockaddr_in)); + + sin.sin_family = AF_INET; + + if (address != NULL) + { + sin.sin_port = ENET_HOST_TO_NET_16 (address -> port); + sin.sin_addr.s_addr = address -> host; + } + else + { + sin.sin_port = 0; + sin.sin_addr.s_addr = INADDR_ANY; + } + + return bind (socket, + (struct sockaddr *) & sin, + sizeof (struct sockaddr_in)) == SOCKET_ERROR ? -1 : 0; +} + +int +enet_socket_get_address (ENetSocket socket, ENetAddress * address) +{ + struct sockaddr_in sin; + int sinLength = sizeof (struct sockaddr_in); + + if (getsockname (socket, (struct sockaddr *) & sin, & sinLength) == -1) + return -1; + + address -> host = (enet_uint32) sin.sin_addr.s_addr; + address -> port = ENET_NET_TO_HOST_16 (sin.sin_port); + + return 0; +} + +int +enet_socket_listen (ENetSocket socket, int backlog) +{ + return listen (socket, backlog < 0 ? SOMAXCONN : backlog) == SOCKET_ERROR ? -1 : 0; +} + +ENetSocket +enet_socket_create (ENetSocketType type) +{ + return socket (PF_INET, type == ENET_SOCKET_TYPE_DATAGRAM ? SOCK_DGRAM : SOCK_STREAM, 0); +} + +int +enet_socket_set_option (ENetSocket socket, ENetSocketOption option, int value) +{ + int result = SOCKET_ERROR; + switch (option) + { + case ENET_SOCKOPT_NONBLOCK: + { + u_long nonBlocking = (u_long) value; + result = ioctlsocket (socket, FIONBIO, & nonBlocking); + break; + } + + case ENET_SOCKOPT_BROADCAST: + result = setsockopt (socket, SOL_SOCKET, SO_BROADCAST, (char *) & value, sizeof (int)); + break; + + case ENET_SOCKOPT_REUSEADDR: + result = setsockopt (socket, SOL_SOCKET, SO_REUSEADDR, (char *) & value, sizeof (int)); + break; + + case ENET_SOCKOPT_RCVBUF: + result = setsockopt (socket, SOL_SOCKET, SO_RCVBUF, (char *) & value, sizeof (int)); + break; + + case ENET_SOCKOPT_SNDBUF: + result = setsockopt (socket, SOL_SOCKET, SO_SNDBUF, (char *) & value, sizeof (int)); + break; + + case ENET_SOCKOPT_RCVTIMEO: + result = setsockopt (socket, SOL_SOCKET, SO_RCVTIMEO, (char *) & value, sizeof (int)); + break; + + case ENET_SOCKOPT_SNDTIMEO: + result = setsockopt (socket, SOL_SOCKET, SO_SNDTIMEO, (char *) & value, sizeof (int)); + break; + + case ENET_SOCKOPT_NODELAY: + result = setsockopt (socket, IPPROTO_TCP, TCP_NODELAY, (char *) & value, sizeof (int)); + break; + + case ENET_SOCKOPT_TTL: + result = setsockopt (socket, IPPROTO_IP, IP_TTL, (char *) & value, sizeof (int)); + break; + + default: + break; + } + return result == SOCKET_ERROR ? -1 : 0; +} + +int +enet_socket_get_option (ENetSocket socket, ENetSocketOption option, int * value) +{ + int result = SOCKET_ERROR, len; + switch (option) + { + case ENET_SOCKOPT_ERROR: + len = sizeof(int); + result = getsockopt (socket, SOL_SOCKET, SO_ERROR, (char *) value, & len); + break; + + case ENET_SOCKOPT_TTL: + len = sizeof(int); + result = getsockopt (socket, IPPROTO_IP, IP_TTL, (char *) value, & len); + break; + + default: + break; + } + return result == SOCKET_ERROR ? -1 : 0; +} + +int +enet_socket_connect (ENetSocket socket, const ENetAddress * address) +{ + struct sockaddr_in sin; + int result; + + memset (& sin, 0, sizeof (struct sockaddr_in)); + + sin.sin_family = AF_INET; + sin.sin_port = ENET_HOST_TO_NET_16 (address -> port); + sin.sin_addr.s_addr = address -> host; + + result = connect (socket, (struct sockaddr *) & sin, sizeof (struct sockaddr_in)); + if (result == SOCKET_ERROR && WSAGetLastError () != WSAEWOULDBLOCK) + return -1; + + return 0; +} + +ENetSocket +enet_socket_accept (ENetSocket socket, ENetAddress * address) +{ + SOCKET result; + struct sockaddr_in sin; + int sinLength = sizeof (struct sockaddr_in); + + result = accept (socket, + address != NULL ? (struct sockaddr *) & sin : NULL, + address != NULL ? & sinLength : NULL); + + if (result == INVALID_SOCKET) + return ENET_SOCKET_NULL; + + if (address != NULL) + { + address -> host = (enet_uint32) sin.sin_addr.s_addr; + address -> port = ENET_NET_TO_HOST_16 (sin.sin_port); + } + + return result; +} + +int +enet_socket_shutdown (ENetSocket socket, ENetSocketShutdown how) +{ + return shutdown (socket, (int) how) == SOCKET_ERROR ? -1 : 0; +} + +void +enet_socket_destroy (ENetSocket socket) +{ + if (socket != INVALID_SOCKET) + closesocket (socket); +} + +int +enet_socket_send (ENetSocket socket, + const ENetAddress * address, + const ENetBuffer * buffers, + size_t bufferCount) +{ + struct sockaddr_in sin; + DWORD sentLength = 0; + + if (address != NULL) + { + memset (& sin, 0, sizeof (struct sockaddr_in)); + + sin.sin_family = AF_INET; + sin.sin_port = ENET_HOST_TO_NET_16 (address -> port); + sin.sin_addr.s_addr = address -> host; + } + + if (WSASendTo (socket, + (LPWSABUF) buffers, + (DWORD) bufferCount, + & sentLength, + 0, + address != NULL ? (struct sockaddr *) & sin : NULL, + address != NULL ? sizeof (struct sockaddr_in) : 0, + NULL, + NULL) == SOCKET_ERROR) + { + if (WSAGetLastError () == WSAEWOULDBLOCK) + return 0; + + return -1; + } + + return (int) sentLength; +} + +int +enet_socket_receive (ENetSocket socket, + ENetAddress * address, + ENetBuffer * buffers, + size_t bufferCount) +{ + INT sinLength = sizeof (struct sockaddr_in); + DWORD flags = 0, + recvLength = 0; + struct sockaddr_in sin; + + if (WSARecvFrom (socket, + (LPWSABUF) buffers, + (DWORD) bufferCount, + & recvLength, + & flags, + address != NULL ? (struct sockaddr *) & sin : NULL, + address != NULL ? & sinLength : NULL, + NULL, + NULL) == SOCKET_ERROR) + { + switch (WSAGetLastError ()) + { + case WSAEWOULDBLOCK: + case WSAECONNRESET: + return 0; + case WSAEINTR: + case WSAEMSGSIZE: + return -2; + default: + return -1; + } + } + + if (flags & MSG_PARTIAL) + return -2; + + if (address != NULL) + { + address -> host = (enet_uint32) sin.sin_addr.s_addr; + address -> port = ENET_NET_TO_HOST_16 (sin.sin_port); + } + + return (int) recvLength; +} + +int +enet_socketset_select (ENetSocket maxSocket, ENetSocketSet * readSet, ENetSocketSet * writeSet, enet_uint32 timeout) +{ + struct timeval timeVal; + + timeVal.tv_sec = timeout / 1000; + timeVal.tv_usec = (timeout % 1000) * 1000; + + return select (maxSocket + 1, readSet, writeSet, NULL, & timeVal); +} + +int +enet_socket_wait (ENetSocket socket, enet_uint32 * condition, enet_uint32 timeout) +{ + fd_set readSet, writeSet; + struct timeval timeVal; + int selectCount; + + timeVal.tv_sec = timeout / 1000; + timeVal.tv_usec = (timeout % 1000) * 1000; + + FD_ZERO (& readSet); + FD_ZERO (& writeSet); + + if (* condition & ENET_SOCKET_WAIT_SEND) + FD_SET (socket, & writeSet); + + if (* condition & ENET_SOCKET_WAIT_RECEIVE) + FD_SET (socket, & readSet); + + selectCount = select (socket + 1, & readSet, & writeSet, NULL, & timeVal); + + if (selectCount < 0) + return -1; + + * condition = ENET_SOCKET_WAIT_NONE; + + if (selectCount == 0) + return 0; + + if (FD_ISSET (socket, & writeSet)) + * condition |= ENET_SOCKET_WAIT_SEND; + + if (FD_ISSET (socket, & readSet)) + * condition |= ENET_SOCKET_WAIT_RECEIVE; + + return 0; +} + +#endif + diff --git a/services/go/pkg/enet/event.go b/pkg/enet/event.go similarity index 92% rename from services/go/pkg/enet/event.go rename to pkg/enet/event.go index d2f2b8808..32fea44e5 100644 --- a/services/go/pkg/enet/event.go +++ b/pkg/enet/event.go @@ -1,7 +1,8 @@ package enet /* -#cgo LDFLAGS: -lenet +#cgo LDFLAGS: -L./enet -lenet +#cgo CFLAGS: -I./enet/include #include */ import "C" diff --git a/services/go/pkg/enet/host.go b/pkg/enet/host.go similarity index 95% rename from services/go/pkg/enet/host.go rename to pkg/enet/host.go index b23900f38..6bec87643 100644 --- a/services/go/pkg/enet/host.go +++ b/pkg/enet/host.go @@ -1,10 +1,11 @@ package enet /* -#cgo LDFLAGS: -lenet +#cgo LDFLAGS: -L./enet -lenet +#cgo CFLAGS: -I./enet/include +#include #include #include -#include ENetHost* initClient(const char *addr, int port) { if (enet_initialize() != 0) { @@ -74,8 +75,8 @@ void cleanupHost(ENetHost* host) { enet_uint16 getLastCommand(ENetPeer *peer) { ENetOutgoingCommand * outgoingCommand = NULL; ENetListIterator currentCommand; - for (currentCommand = enet_list_begin(&peer->outgoingReliableCommands); - currentCommand != enet_list_end(&peer->outgoingReliableCommands); + for (currentCommand = enet_list_begin(&peer->outgoingSendReliableCommands); + currentCommand != enet_list_end(&peer->outgoingSendReliableCommands); currentCommand = enet_list_next(currentCommand)) { outgoingCommand = (ENetOutgoingCommand *) currentCommand; diff --git a/services/go/pkg/enet/packet.go b/pkg/enet/packet.go similarity index 91% rename from services/go/pkg/enet/packet.go rename to pkg/enet/packet.go index 7b5d40d68..c46e1815e 100644 --- a/services/go/pkg/enet/packet.go +++ b/pkg/enet/packet.go @@ -1,7 +1,8 @@ package enet /* -#cgo LDFLAGS: -lenet +#cgo LDFLAGS: -L./enet -lenet +#cgo CFLAGS: -I./enet/include #include */ import "C" diff --git a/services/go/pkg/enet/peer.go b/pkg/enet/peer.go similarity index 91% rename from services/go/pkg/enet/peer.go rename to pkg/enet/peer.go index 530b8572b..c6bb0ff7b 100644 --- a/services/go/pkg/enet/peer.go +++ b/pkg/enet/peer.go @@ -1,15 +1,16 @@ package enet /* -#cgo LDFLAGS: -lenet +#cgo LDFLAGS: -L./enet -lenet +#cgo CFLAGS: -I./enet/include #include #include int isCommandPresent(ENetPeer *peer, enet_uint16 seq) { ENetOutgoingCommand * outgoingCommand = NULL; ENetListIterator currentCommand; - for (currentCommand = enet_list_begin(&peer->outgoingReliableCommands); - currentCommand != enet_list_end(&peer->outgoingReliableCommands); + for (currentCommand = enet_list_begin(&peer->outgoingSendReliableCommands); + currentCommand != enet_list_end(&peer->outgoingSendReliableCommands); currentCommand = enet_list_next(currentCommand)) { outgoingCommand = (ENetOutgoingCommand *) currentCommand; diff --git a/services/go/pkg/enet/socket.go b/pkg/enet/socket.go similarity index 98% rename from services/go/pkg/enet/socket.go rename to pkg/enet/socket.go index 6888d37fe..0db38c122 100644 --- a/services/go/pkg/enet/socket.go +++ b/pkg/enet/socket.go @@ -1,11 +1,11 @@ package enet /* -#cgo LDFLAGS: -lenet +#cgo LDFLAGS: -L./enet -lenet +#cgo CFLAGS: -I./enet/include +#include #include #include -#include - ENetSocket _initConnectSocket(const char *host, int port) { ENetAddress serverAddress = { ENET_HOST_ANY, ENET_PORT_ANY }; @@ -58,6 +58,8 @@ int sendSocket(ENetSocket sock, const void * req, size_t numBytes) { if(reqlen <= 0) break; } } + + return 0; } int receiveSocket(ENetSocket sock, void * data, size_t maxSize) { diff --git a/services/go/pkg/game/colors.go b/pkg/game/colors.go similarity index 100% rename from services/go/pkg/game/colors.go rename to pkg/game/colors.go diff --git a/services/go/pkg/game/commands/command_test.go b/pkg/game/commands/command_test.go similarity index 100% rename from services/go/pkg/game/commands/command_test.go rename to pkg/game/commands/command_test.go diff --git a/services/go/pkg/game/commands/module.go b/pkg/game/commands/module.go similarity index 100% rename from services/go/pkg/game/commands/module.go rename to pkg/game/commands/module.go diff --git a/services/go/pkg/game/constants/module.go b/pkg/game/constants/module.go similarity index 100% rename from services/go/pkg/game/constants/module.go rename to pkg/game/constants/module.go diff --git a/services/go/pkg/game/io/buffer.go b/pkg/game/io/buffer.go similarity index 100% rename from services/go/pkg/game/io/buffer.go rename to pkg/game/io/buffer.go diff --git a/services/go/pkg/game/io/conversion.go b/pkg/game/io/conversion.go similarity index 100% rename from services/go/pkg/game/io/conversion.go rename to pkg/game/io/conversion.go diff --git a/services/go/pkg/game/io/packet.go b/pkg/game/io/packet.go similarity index 100% rename from services/go/pkg/game/io/packet.go rename to pkg/game/io/packet.go diff --git a/services/go/pkg/game/io/sanitize.go b/pkg/game/io/sanitize.go similarity index 100% rename from services/go/pkg/game/io/sanitize.go rename to pkg/game/io/sanitize.go diff --git a/services/go/pkg/game/io/serde.go b/pkg/game/io/serde.go similarity index 100% rename from services/go/pkg/game/io/serde.go rename to pkg/game/io/serde.go diff --git a/services/go/pkg/game/io/serde_test.go b/pkg/game/io/serde_test.go similarity index 100% rename from services/go/pkg/game/io/serde_test.go rename to pkg/game/io/serde_test.go diff --git a/services/go/pkg/game/protocol/constants.go b/pkg/game/protocol/constants.go similarity index 100% rename from services/go/pkg/game/protocol/constants.go rename to pkg/game/protocol/constants.go diff --git a/services/go/pkg/game/protocol/editing.go b/pkg/game/protocol/editing.go similarity index 100% rename from services/go/pkg/game/protocol/editing.go rename to pkg/game/protocol/editing.go diff --git a/services/go/pkg/game/protocol/intercept.go b/pkg/game/protocol/intercept.go similarity index 100% rename from services/go/pkg/game/protocol/intercept.go rename to pkg/game/protocol/intercept.go diff --git a/services/go/pkg/game/protocol/io.go b/pkg/game/protocol/io.go similarity index 100% rename from services/go/pkg/game/protocol/io.go rename to pkg/game/protocol/io.go diff --git a/services/go/pkg/game/protocol/module.go b/pkg/game/protocol/module.go similarity index 100% rename from services/go/pkg/game/protocol/module.go rename to pkg/game/protocol/module.go diff --git a/services/go/pkg/game/protocol/physics.go b/pkg/game/protocol/physics.go similarity index 100% rename from services/go/pkg/game/protocol/physics.go rename to pkg/game/protocol/physics.go diff --git a/services/go/pkg/game/variables/module.go b/pkg/game/variables/module.go similarity index 100% rename from services/go/pkg/game/variables/module.go rename to pkg/game/variables/module.go diff --git a/services/go/pkg/maps/api/entities/module.go b/pkg/maps/api/entities/module.go similarity index 100% rename from services/go/pkg/maps/api/entities/module.go rename to pkg/maps/api/entities/module.go diff --git a/services/go/pkg/maps/api/entities/particles.go b/pkg/maps/api/entities/particles.go similarity index 100% rename from services/go/pkg/maps/api/entities/particles.go rename to pkg/maps/api/entities/particles.go diff --git a/services/go/pkg/maps/api/entities/serde.go b/pkg/maps/api/entities/serde.go similarity index 100% rename from services/go/pkg/maps/api/entities/serde.go rename to pkg/maps/api/entities/serde.go diff --git a/services/go/pkg/maps/api/entities/serde_test.go b/pkg/maps/api/entities/serde_test.go similarity index 100% rename from services/go/pkg/maps/api/entities/serde_test.go rename to pkg/maps/api/entities/serde_test.go diff --git a/services/go/pkg/maps/api/module.go b/pkg/maps/api/module.go similarity index 100% rename from services/go/pkg/maps/api/module.go rename to pkg/maps/api/module.go diff --git a/services/go/pkg/maps/decode.go b/pkg/maps/decode.go similarity index 100% rename from services/go/pkg/maps/decode.go rename to pkg/maps/decode.go diff --git a/services/go/pkg/maps/default.textures b/pkg/maps/default.textures similarity index 100% rename from services/go/pkg/maps/default.textures rename to pkg/maps/default.textures diff --git a/services/go/pkg/maps/edit.go b/pkg/maps/edit.go similarity index 100% rename from services/go/pkg/maps/edit.go rename to pkg/maps/edit.go diff --git a/services/go/pkg/maps/encode.go b/pkg/maps/encode.go similarity index 100% rename from services/go/pkg/maps/encode.go rename to pkg/maps/encode.go diff --git a/services/go/pkg/maps/translate.go b/pkg/maps/translate.go similarity index 100% rename from services/go/pkg/maps/translate.go rename to pkg/maps/translate.go diff --git a/services/go/pkg/maps/types.go b/pkg/maps/types.go similarity index 100% rename from services/go/pkg/maps/types.go rename to pkg/maps/types.go diff --git a/services/go/pkg/maps/worldio/blend.cpp b/pkg/maps/worldio/blend.cpp similarity index 100% rename from services/go/pkg/maps/worldio/blend.cpp rename to pkg/maps/worldio/blend.cpp diff --git a/services/go/pkg/maps/worldio/command.h b/pkg/maps/worldio/command.h similarity index 100% rename from services/go/pkg/maps/worldio/command.h rename to pkg/maps/worldio/command.h diff --git a/services/go/pkg/maps/worldio/cube.h b/pkg/maps/worldio/cube.h similarity index 100% rename from services/go/pkg/maps/worldio/cube.h rename to pkg/maps/worldio/cube.h diff --git a/services/go/pkg/maps/worldio/engine.h b/pkg/maps/worldio/engine.h similarity index 100% rename from services/go/pkg/maps/worldio/engine.h rename to pkg/maps/worldio/engine.h diff --git a/services/go/pkg/maps/worldio/entities.cpp b/pkg/maps/worldio/entities.cpp similarity index 100% rename from services/go/pkg/maps/worldio/entities.cpp rename to pkg/maps/worldio/entities.cpp diff --git a/services/go/pkg/maps/worldio/ents.h b/pkg/maps/worldio/ents.h similarity index 100% rename from services/go/pkg/maps/worldio/ents.h rename to pkg/maps/worldio/ents.h diff --git a/services/go/pkg/maps/worldio/game.h b/pkg/maps/worldio/game.h similarity index 100% rename from services/go/pkg/maps/worldio/game.h rename to pkg/maps/worldio/game.h diff --git a/services/go/pkg/maps/worldio/geom.h b/pkg/maps/worldio/geom.h similarity index 100% rename from services/go/pkg/maps/worldio/geom.h rename to pkg/maps/worldio/geom.h diff --git a/services/go/pkg/maps/worldio/iengine.h b/pkg/maps/worldio/iengine.h similarity index 100% rename from services/go/pkg/maps/worldio/iengine.h rename to pkg/maps/worldio/iengine.h diff --git a/services/go/pkg/maps/worldio/igame.h b/pkg/maps/worldio/igame.h similarity index 100% rename from services/go/pkg/maps/worldio/igame.h rename to pkg/maps/worldio/igame.h diff --git a/services/go/pkg/maps/worldio/lightmap.cpp b/pkg/maps/worldio/lightmap.cpp similarity index 100% rename from services/go/pkg/maps/worldio/lightmap.cpp rename to pkg/maps/worldio/lightmap.cpp diff --git a/services/go/pkg/maps/worldio/lightmap.h b/pkg/maps/worldio/lightmap.h similarity index 100% rename from services/go/pkg/maps/worldio/lightmap.h rename to pkg/maps/worldio/lightmap.h diff --git a/services/go/pkg/maps/worldio/octa.cpp b/pkg/maps/worldio/octa.cpp similarity index 100% rename from services/go/pkg/maps/worldio/octa.cpp rename to pkg/maps/worldio/octa.cpp diff --git a/services/go/pkg/maps/worldio/octa.h b/pkg/maps/worldio/octa.h similarity index 100% rename from services/go/pkg/maps/worldio/octa.h rename to pkg/maps/worldio/octa.h diff --git a/services/go/pkg/maps/worldio/octaedit.cpp b/pkg/maps/worldio/octaedit.cpp similarity index 100% rename from services/go/pkg/maps/worldio/octaedit.cpp rename to pkg/maps/worldio/octaedit.cpp diff --git a/services/go/pkg/maps/worldio/pvs.cpp b/pkg/maps/worldio/pvs.cpp similarity index 100% rename from services/go/pkg/maps/worldio/pvs.cpp rename to pkg/maps/worldio/pvs.cpp diff --git a/services/go/pkg/maps/worldio/state.h b/pkg/maps/worldio/state.h similarity index 100% rename from services/go/pkg/maps/worldio/state.h rename to pkg/maps/worldio/state.h diff --git a/services/go/pkg/maps/worldio/texture.h b/pkg/maps/worldio/texture.h similarity index 100% rename from services/go/pkg/maps/worldio/texture.h rename to pkg/maps/worldio/texture.h diff --git a/services/go/pkg/maps/worldio/tools.h b/pkg/maps/worldio/tools.h similarity index 100% rename from services/go/pkg/maps/worldio/tools.h rename to pkg/maps/worldio/tools.h diff --git a/services/go/pkg/maps/worldio/world.cpp b/pkg/maps/worldio/world.cpp similarity index 100% rename from services/go/pkg/maps/worldio/world.cpp rename to pkg/maps/worldio/world.cpp diff --git a/services/go/pkg/maps/worldio/world.h b/pkg/maps/worldio/world.h similarity index 100% rename from services/go/pkg/maps/worldio/world.h rename to pkg/maps/worldio/world.h diff --git a/services/go/pkg/maps/worldio/worldio.cpp b/pkg/maps/worldio/worldio.cpp similarity index 100% rename from services/go/pkg/maps/worldio/worldio.cpp rename to pkg/maps/worldio/worldio.cpp diff --git a/services/go/pkg/maps/worldio/worldio.go b/pkg/maps/worldio/worldio.go similarity index 100% rename from services/go/pkg/maps/worldio/worldio.go rename to pkg/maps/worldio/worldio.go diff --git a/services/go/pkg/maps/worldio/worldio.h b/pkg/maps/worldio/worldio.h similarity index 100% rename from services/go/pkg/maps/worldio/worldio.h rename to pkg/maps/worldio/worldio.h diff --git a/services/go/pkg/maps/worldio/worldio.swigcxx b/pkg/maps/worldio/worldio.swigcxx similarity index 100% rename from services/go/pkg/maps/worldio/worldio.swigcxx rename to pkg/maps/worldio/worldio.swigcxx diff --git a/services/go/pkg/maps/worldio/wrap.h b/pkg/maps/worldio/wrap.h similarity index 100% rename from services/go/pkg/maps/worldio/wrap.h rename to pkg/maps/worldio/wrap.h diff --git a/services/go/pkg/min/commands.go b/pkg/min/commands.go similarity index 100% rename from services/go/pkg/min/commands.go rename to pkg/min/commands.go diff --git a/services/go/pkg/min/index.go b/pkg/min/index.go similarity index 100% rename from services/go/pkg/min/index.go rename to pkg/min/index.go diff --git a/services/go/pkg/min/model.go b/pkg/min/model.go similarity index 100% rename from services/go/pkg/min/model.go rename to pkg/min/model.go diff --git a/services/go/pkg/min/module.go b/pkg/min/module.go similarity index 100% rename from services/go/pkg/min/module.go rename to pkg/min/module.go diff --git a/services/go/pkg/min/textures.go b/pkg/min/textures.go similarity index 100% rename from services/go/pkg/min/textures.go rename to pkg/min/textures.go diff --git a/services/go/pkg/mmr/module.go b/pkg/mmr/module.go similarity index 100% rename from services/go/pkg/mmr/module.go rename to pkg/mmr/module.go diff --git a/services/go/pkg/server/LICENSE b/pkg/server/LICENSE similarity index 100% rename from services/go/pkg/server/LICENSE rename to pkg/server/LICENSE diff --git a/services/go/pkg/server/README.md b/pkg/server/README.md similarity index 100% rename from services/go/pkg/server/README.md rename to pkg/server/README.md diff --git a/services/go/pkg/server/auth.go b/pkg/server/auth.go similarity index 100% rename from services/go/pkg/server/auth.go rename to pkg/server/auth.go diff --git a/services/go/pkg/server/client.go b/pkg/server/client.go similarity index 100% rename from services/go/pkg/server/client.go rename to pkg/server/client.go diff --git a/services/go/pkg/server/clients.go b/pkg/server/clients.go similarity index 100% rename from services/go/pkg/server/clients.go rename to pkg/server/clients.go diff --git a/services/go/pkg/server/config.go b/pkg/server/config.go similarity index 100% rename from services/go/pkg/server/config.go rename to pkg/server/config.go diff --git a/services/go/pkg/server/game.go b/pkg/server/game.go similarity index 100% rename from services/go/pkg/server/game.go rename to pkg/server/game.go diff --git a/services/go/pkg/server/game/capture.go b/pkg/server/game/capture.go similarity index 100% rename from services/go/pkg/server/game/capture.go rename to pkg/server/game/capture.go diff --git a/services/go/pkg/server/game/clock.go b/pkg/server/game/clock.go similarity index 100% rename from services/go/pkg/server/game/clock.go rename to pkg/server/game/clock.go diff --git a/services/go/pkg/server/game/coop.go b/pkg/server/game/coop.go similarity index 100% rename from services/go/pkg/server/game/coop.go rename to pkg/server/game/coop.go diff --git a/services/go/pkg/server/game/ctf.go b/pkg/server/game/ctf.go similarity index 100% rename from services/go/pkg/server/game/ctf.go rename to pkg/server/game/ctf.go diff --git a/services/go/pkg/server/game/deathmatch.go b/pkg/server/game/deathmatch.go similarity index 100% rename from services/go/pkg/server/game/deathmatch.go rename to pkg/server/game/deathmatch.go diff --git a/services/go/pkg/server/game/flags.go b/pkg/server/game/flags.go similarity index 100% rename from services/go/pkg/server/game/flags.go rename to pkg/server/game/flags.go diff --git a/services/go/pkg/server/game/flags_ctf.go b/pkg/server/game/flags_ctf.go similarity index 100% rename from services/go/pkg/server/game/flags_ctf.go rename to pkg/server/game/flags_ctf.go diff --git a/services/go/pkg/server/game/game_test.go b/pkg/server/game/game_test.go similarity index 100% rename from services/go/pkg/server/game/game_test.go rename to pkg/server/game/game_test.go diff --git a/services/go/pkg/server/game/mode.go b/pkg/server/game/mode.go similarity index 100% rename from services/go/pkg/server/game/mode.go rename to pkg/server/game/mode.go diff --git a/services/go/pkg/server/game/pickups.go b/pkg/server/game/pickups.go similarity index 100% rename from services/go/pkg/server/game/pickups.go rename to pkg/server/game/pickups.go diff --git a/services/go/pkg/server/game/player.go b/pkg/server/game/player.go similarity index 100% rename from services/go/pkg/server/game/player.go rename to pkg/server/game/player.go diff --git a/services/go/pkg/server/game/player_state.go b/pkg/server/game/player_state.go similarity index 100% rename from services/go/pkg/server/game/player_state.go rename to pkg/server/game/player_state.go diff --git a/services/go/pkg/server/game/server.go b/pkg/server/game/server.go similarity index 100% rename from services/go/pkg/server/game/server.go rename to pkg/server/game/server.go diff --git a/services/go/pkg/server/game/team.go b/pkg/server/game/team.go similarity index 100% rename from services/go/pkg/server/game/team.go rename to pkg/server/game/team.go diff --git a/services/go/pkg/server/game/team_mode.go b/pkg/server/game/team_mode.go similarity index 100% rename from services/go/pkg/server/game/team_mode.go rename to pkg/server/game/team_mode.go diff --git a/services/go/pkg/server/geom/vector.go b/pkg/server/geom/vector.go similarity index 100% rename from services/go/pkg/server/geom/vector.go rename to pkg/server/geom/vector.go diff --git a/services/go/pkg/server/module.go b/pkg/server/module.go similarity index 100% rename from services/go/pkg/server/module.go rename to pkg/server/module.go diff --git a/services/go/pkg/server/net/packet/packet.go b/pkg/server/net/packet/packet.go similarity index 100% rename from services/go/pkg/server/net/packet/packet.go rename to pkg/server/net/packet/packet.go diff --git a/services/go/pkg/server/packet_handling.go b/pkg/server/packet_handling.go similarity index 100% rename from services/go/pkg/server/packet_handling.go rename to pkg/server/packet_handling.go diff --git a/services/go/pkg/server/pausableticker/pausable_ticker.go b/pkg/server/pausableticker/pausable_ticker.go similarity index 100% rename from services/go/pkg/server/pausableticker/pausable_ticker.go rename to pkg/server/pausableticker/pausable_ticker.go diff --git a/services/go/pkg/server/protocol/armour/armour.go b/pkg/server/protocol/armour/armour.go similarity index 100% rename from services/go/pkg/server/protocol/armour/armour.go rename to pkg/server/protocol/armour/armour.go diff --git a/services/go/pkg/server/protocol/cubecode/README.md b/pkg/server/protocol/cubecode/README.md similarity index 100% rename from services/go/pkg/server/protocol/cubecode/README.md rename to pkg/server/protocol/cubecode/README.md diff --git a/services/go/pkg/server/protocol/cubecode/colors.go b/pkg/server/protocol/cubecode/colors.go similarity index 100% rename from services/go/pkg/server/protocol/cubecode/colors.go rename to pkg/server/protocol/cubecode/colors.go diff --git a/services/go/pkg/server/protocol/cubecode/conversion.go b/pkg/server/protocol/cubecode/conversion.go similarity index 100% rename from services/go/pkg/server/protocol/cubecode/conversion.go rename to pkg/server/protocol/cubecode/conversion.go diff --git a/services/go/pkg/server/protocol/cubecode/sanitize.go b/pkg/server/protocol/cubecode/sanitize.go similarity index 100% rename from services/go/pkg/server/protocol/cubecode/sanitize.go rename to pkg/server/protocol/cubecode/sanitize.go diff --git a/services/go/pkg/server/protocol/disconnectreason/disconnect_reasons.go b/pkg/server/protocol/disconnectreason/disconnect_reasons.go similarity index 100% rename from services/go/pkg/server/protocol/disconnectreason/disconnect_reasons.go rename to pkg/server/protocol/disconnectreason/disconnect_reasons.go diff --git a/services/go/pkg/server/protocol/entity/entities.go b/pkg/server/protocol/entity/entities.go similarity index 100% rename from services/go/pkg/server/protocol/entity/entities.go rename to pkg/server/protocol/entity/entities.go diff --git a/services/go/pkg/server/protocol/entity/pickups.go b/pkg/server/protocol/entity/pickups.go similarity index 100% rename from services/go/pkg/server/protocol/entity/pickups.go rename to pkg/server/protocol/entity/pickups.go diff --git a/services/go/pkg/server/protocol/gamemode/game_mode.go b/pkg/server/protocol/gamemode/game_mode.go similarity index 100% rename from services/go/pkg/server/protocol/gamemode/game_mode.go rename to pkg/server/protocol/gamemode/game_mode.go diff --git a/services/go/pkg/server/protocol/mastermode/master_mode.go b/pkg/server/protocol/mastermode/master_mode.go similarity index 100% rename from services/go/pkg/server/protocol/mastermode/master_mode.go rename to pkg/server/protocol/mastermode/master_mode.go diff --git a/services/go/pkg/server/protocol/nmc/network_message_codes.go b/pkg/server/protocol/nmc/network_message_codes.go similarity index 100% rename from services/go/pkg/server/protocol/nmc/network_message_codes.go rename to pkg/server/protocol/nmc/network_message_codes.go diff --git a/services/go/pkg/server/protocol/packet.go b/pkg/server/protocol/packet.go similarity index 100% rename from services/go/pkg/server/protocol/packet.go rename to pkg/server/protocol/packet.go diff --git a/services/go/pkg/server/protocol/playerstate/player_state.go b/pkg/server/protocol/playerstate/player_state.go similarity index 100% rename from services/go/pkg/server/protocol/playerstate/player_state.go rename to pkg/server/protocol/playerstate/player_state.go diff --git a/services/go/pkg/server/protocol/role/role.go b/pkg/server/protocol/role/role.go similarity index 100% rename from services/go/pkg/server/protocol/role/role.go rename to pkg/server/protocol/role/role.go diff --git a/services/go/pkg/server/protocol/sound/sound.go b/pkg/server/protocol/sound/sound.go similarity index 100% rename from services/go/pkg/server/protocol/sound/sound.go rename to pkg/server/protocol/sound/sound.go diff --git a/services/go/pkg/server/protocol/version.go b/pkg/server/protocol/version.go similarity index 100% rename from services/go/pkg/server/protocol/version.go rename to pkg/server/protocol/version.go diff --git a/services/go/pkg/server/protocol/weapon/weapon.go b/pkg/server/protocol/weapon/weapon.go similarity index 100% rename from services/go/pkg/server/protocol/weapon/weapon.go rename to pkg/server/protocol/weapon/weapon.go diff --git a/services/go/pkg/server/relay/publisher.go b/pkg/server/relay/publisher.go similarity index 100% rename from services/go/pkg/server/relay/publisher.go rename to pkg/server/relay/publisher.go diff --git a/services/go/pkg/server/relay/relay.go b/pkg/server/relay/relay.go similarity index 100% rename from services/go/pkg/server/relay/relay.go rename to pkg/server/relay/relay.go diff --git a/services/go/pkg/server/server_commands.go b/pkg/server/server_commands.go similarity index 100% rename from services/go/pkg/server/server_commands.go rename to pkg/server/server_commands.go diff --git a/services/go/pkg/server/state.go b/pkg/server/state.go similarity index 100% rename from services/go/pkg/server/state.go rename to pkg/server/state.go diff --git a/services/go/pkg/server/timer/LICENSE b/pkg/server/timer/LICENSE similarity index 100% rename from services/go/pkg/server/timer/LICENSE rename to pkg/server/timer/LICENSE diff --git a/services/go/pkg/server/timer/README.md b/pkg/server/timer/README.md similarity index 100% rename from services/go/pkg/server/timer/README.md rename to pkg/server/timer/README.md diff --git a/services/go/pkg/server/timer/module.go b/pkg/server/timer/module.go similarity index 100% rename from services/go/pkg/server/timer/module.go rename to pkg/server/timer/module.go diff --git a/services/go/pkg/utils/colors.go b/pkg/utils/colors.go similarity index 100% rename from services/go/pkg/utils/colors.go rename to pkg/utils/colors.go diff --git a/services/go/pkg/utils/hash.go b/pkg/utils/hash.go similarity index 100% rename from services/go/pkg/utils/hash.go rename to pkg/utils/hash.go diff --git a/services/go/pkg/utils/pubsub.go b/pkg/utils/pubsub.go similarity index 100% rename from services/go/pkg/utils/pubsub.go rename to pkg/utils/pubsub.go diff --git a/services/go/pkg/utils/session.go b/pkg/utils/session.go similarity index 100% rename from services/go/pkg/utils/session.go rename to pkg/utils/session.go diff --git a/pkg/version/module.go b/pkg/version/module.go new file mode 100644 index 000000000..03804ddc5 --- /dev/null +++ b/pkg/version/module.go @@ -0,0 +1,9 @@ +package version + +// These should be set via go build -ldflags -X 'xxxx'. +var ( + Version = "development" + GoVersion = "1.21" + GitCommit = "unknown" + BuildTime = "1979-01-09T16:09:53+00:00" +) diff --git a/services/proxy/.gitignore b/proxy/.gitignore similarity index 100% rename from services/proxy/.gitignore rename to proxy/.gitignore diff --git a/services/proxy/Makefile b/proxy/Makefile similarity index 100% rename from services/proxy/Makefile rename to proxy/Makefile diff --git a/services/proxy/README.md b/proxy/README.md similarity index 100% rename from services/proxy/README.md rename to proxy/README.md diff --git a/services/proxy/base64.c b/proxy/base64.c similarity index 100% rename from services/proxy/base64.c rename to proxy/base64.c diff --git a/services/proxy/build b/proxy/build similarity index 100% rename from services/proxy/build rename to proxy/build diff --git a/services/proxy/md5.c b/proxy/md5.c similarity index 100% rename from services/proxy/md5.c rename to proxy/md5.c diff --git a/services/proxy/md5.h b/proxy/md5.h similarity index 100% rename from services/proxy/md5.h rename to proxy/md5.h diff --git a/services/proxy/sha1.c b/proxy/sha1.c similarity index 100% rename from services/proxy/sha1.c rename to proxy/sha1.c diff --git a/services/proxy/sha1.h b/proxy/sha1.h similarity index 100% rename from services/proxy/sha1.h rename to proxy/sha1.h diff --git a/services/proxy/websocket.c b/proxy/websocket.c similarity index 100% rename from services/proxy/websocket.c rename to proxy/websocket.c diff --git a/services/proxy/websocket.h b/proxy/websocket.h similarity index 100% rename from services/proxy/websocket.h rename to proxy/websocket.h diff --git a/services/proxy/websockify.c b/proxy/websockify.c similarity index 100% rename from services/proxy/websockify.c rename to proxy/websockify.c diff --git a/serve b/serve deleted file mode 100755 index 88be312c3..000000000 --- a/serve +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env bash - -earthly +cpp -earthly +emscripten -earthly +go -docker-compose -f services/docker-compose.yml up diff --git a/services/docker-compose.yml b/services/docker-compose.yml deleted file mode 100644 index 1b9deda5e..000000000 --- a/services/docker-compose.yml +++ /dev/null @@ -1,66 +0,0 @@ -version: "3" - -services: - ingress: - image: nginx:stable-alpine - volumes: - - "./ingress/local.conf:/etc/nginx/conf.d/default.conf" - network_mode: host - - server: - image: sour:cpp - command: "watch /server ./qserv" - volumes: - - "../watch:/usr/bin/watch" - - "./server:/server" - network_mode: host - - proxy: - image: sour:cpp - command: "watch /proxy ./wsproxy 28785" - volumes: - - "../watch:/usr/bin/watch" - - "./proxy:/proxy" - network_mode: host - - cluster: - image: sour:go - command: "watch /go ./cluster" - volumes: - - "../watch:/usr/bin/watch" - - "./go:/go" - network_mode: host - - game: - image: sour:emscripten - command: "watch /game cat" - volumes: - - "../watch:/usr/bin/watch" - - "./game/cube2:/game" - - "../build/game:/game/dist/game" - - "../build/emscripten-cache:/emsdk/upstream/emscripten/cache" - network_mode: host - - assets: - image: sour:emscripten - command: "watch /assets cat" - volumes: - - "../watch:/usr/bin/watch" - - "./game/assets:/assets" - - "../build/assets:/assets/output" - network_mode: host - - client: - image: node:14.17.5 - environment: - IN_DOCKER: 1 - command: "/client/build" - volumes: - - "./client:/client" - - "../build:/client/dist" - - "../build/client-node-modules:/client/node_modules" - network_mode: host - tty: true - stdin_open: true - -# vim: expandtab tabstop=2 softtabstop=2 shiftwidth=2 diff --git a/services/go/.gitignore b/services/go/.gitignore deleted file mode 100644 index fadcaa06c..000000000 --- a/services/go/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -/sourdump -/cluster -/shims -/cache -/state.db diff --git a/services/go/build b/services/go/build deleted file mode 100755 index 1841dc885..000000000 --- a/services/go/build +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash - -set -e - -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -cd "$SCRIPT_DIR" -if [ -z "$GITPOD_WORKSPACE_ID" ]; then - unset GOPATH -fi - -go build -gcflags="-e" -o cluster svc/cluster/main.go -go build -gcflags="-e" -o sourdump cmd/sourdump/main.go diff --git a/services/go/config.json b/services/go/config.json deleted file mode 100644 index e69de29bb..000000000 diff --git a/services/ingress/build b/services/ingress/build deleted file mode 100755 index e8cda4b63..000000000 --- a/services/ingress/build +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -cd "$SCRIPT_DIR" - -CONFIG="$SCRIPT_DIR/local.conf" -if ! [ -z "$GITPOD_WORKSPACE_ID" ]; then - CONFIG="$SCRIPT_DIR/gitpod.conf" -fi - -sudo nginx -g 'daemon off;' -c "$CONFIG" diff --git a/services/ingress/gitpod.conf b/services/ingress/gitpod.conf deleted file mode 100644 index 503a29e10..000000000 --- a/services/ingress/gitpod.conf +++ /dev/null @@ -1,46 +0,0 @@ -events { - worker_connections 768; - # multi_accept on; -} - -http { - error_log /var/log/nginx/error.log debug; - map $http_upgrade $connection_upgrade { - default Upgrade; - '' close; - } - - include /etc/nginx/mime.types; - - server { - listen 1234; - - location / { - root /workspace/sour/services/client/dist/; - index index.html; - gzip_static on; - try_files $uri $uri/ /index.html; - } - - location ~ /service/proxy/(u/\w+:\d+)? { - rewrite /service/proxy/(.*) /$1 break; - proxy_pass http://0.0.0.0:28785; - proxy_http_version 1.1; - proxy_set_header Host $host; - proxy_set_header Origin localhost; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - proxy_set_header Sec-WebSocket-Protocol binary; - } - - location /service/cluster/ { - rewrite /service/cluster/(.*) /$1 break; - proxy_pass http://0.0.0.0:29999; - proxy_http_version 1.1; - proxy_set_header Host $host; - proxy_set_header Origin localhost; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - } - } -} diff --git a/services/ingress/local.conf b/services/ingress/local.conf deleted file mode 100644 index c1c1f814f..000000000 --- a/services/ingress/local.conf +++ /dev/null @@ -1,33 +0,0 @@ -map $http_upgrade $connection_upgrade { - default Upgrade; - '' close; -} - -server { - listen 1234; - - location / { - proxy_pass http://0.0.0.0:1235; - } - - location ~ /service/proxy/(u/\w+:\d+)? { - rewrite /service/proxy/(.*) /$1 break; - proxy_pass http://0.0.0.0:28785; - proxy_http_version 1.1; - proxy_set_header Host $host; - proxy_set_header Origin localhost; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - proxy_set_header Sec-WebSocket-Protocol binary; - } - - location = /service/cluster/ { - rewrite /service/cluster/(.*) /$1 break; - proxy_pass http://0.0.0.0:29999; - proxy_http_version 1.1; - proxy_set_header Host $host; - proxy_set_header Origin localhost; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - } -} diff --git a/services/ingress/production.conf b/services/ingress/production.conf deleted file mode 100644 index 5f2886768..000000000 --- a/services/ingress/production.conf +++ /dev/null @@ -1,37 +0,0 @@ -map $http_upgrade $connection_upgrade { - default Upgrade; - '' close; -} - -include /etc/nginx/mime.types; - -server { - listen 1234; - - location / { - root /app/; - index index.html; - try_files $uri $uri/ /index.html; - } - - location /service/proxy/ { - rewrite /service/proxy/(.*) /$1 break; - proxy_pass http://0.0.0.0:28785; - proxy_http_version 1.1; - proxy_set_header Host $host; - proxy_set_header Origin localhost; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - proxy_set_header Sec-WebSocket-Protocol binary; - } - - location /service/cluster/ { - rewrite /service/cluster/(.*) /$1 break; - proxy_pass http://0.0.0.0:29999; - proxy_http_version 1.1; - proxy_set_header Host $host; - proxy_set_header Origin localhost; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - } -} diff --git a/services/redis/redis.conf b/services/redis/redis.conf deleted file mode 100644 index 600adf2c3..000000000 --- a/services/redis/redis.conf +++ /dev/null @@ -1,2278 +0,0 @@ -# Redis configuration file example. -# -# Note that in order to read the configuration file, Redis must be -# started with the file path as first argument: -# -# ./redis-server /path/to/redis.conf - -# Note on units: when memory size is needed, it is possible to specify -# it in the usual form of 1k 5GB 4M and so forth: -# -# 1k => 1000 bytes -# 1kb => 1024 bytes -# 1m => 1000000 bytes -# 1mb => 1024*1024 bytes -# 1g => 1000000000 bytes -# 1gb => 1024*1024*1024 bytes -# -# units are case insensitive so 1GB 1Gb 1gB are all the same. - -################################## INCLUDES ################################### - -# Include one or more other config files here. This is useful if you -# have a standard template that goes to all Redis servers but also need -# to customize a few per-server settings. Include files can include -# other files, so use this wisely. -# -# Note that option "include" won't be rewritten by command "CONFIG REWRITE" -# from admin or Redis Sentinel. Since Redis always uses the last processed -# line as value of a configuration directive, you'd better put includes -# at the beginning of this file to avoid overwriting config change at runtime. -# -# If instead you are interested in using includes to override configuration -# options, it is better to use include as the last line. -# -# Included paths may contain wildcards. All files matching the wildcards will -# be included in alphabetical order. -# Note that if an include path contains a wildcards but no files match it when -# the server is started, the include statement will be ignored and no error will -# be emitted. It is safe, therefore, to include wildcard files from empty -# directories. -# -# include /path/to/local.conf -# include /path/to/other.conf -# include /path/to/fragments/*.conf -# - -################################## MODULES ##################################### - -# Load modules at startup. If the server is not able to load modules -# it will abort. It is possible to use multiple loadmodule directives. -# -# loadmodule /path/to/my_module.so -# loadmodule /path/to/other_module.so - -################################## NETWORK ##################################### - -# By default, if no "bind" configuration directive is specified, Redis listens -# for connections from all available network interfaces on the host machine. -# It is possible to listen to just one or multiple selected interfaces using -# the "bind" configuration directive, followed by one or more IP addresses. -# Each address can be prefixed by "-", which means that redis will not fail to -# start if the address is not available. Being not available only refers to -# addresses that does not correspond to any network interface. Addresses that -# are already in use will always fail, and unsupported protocols will always BE -# silently skipped. -# -# Examples: -# -# bind 192.168.1.100 10.0.0.1 # listens on two specific IPv4 addresses -# bind 127.0.0.1 ::1 # listens on loopback IPv4 and IPv6 -# bind * -::* # like the default, all available interfaces -# -# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the -# internet, binding to all the interfaces is dangerous and will expose the -# instance to everybody on the internet. So by default we uncomment the -# following bind directive, that will force Redis to listen only on the -# IPv4 and IPv6 (if available) loopback interface addresses (this means Redis -# will only be able to accept client connections from the same host that it is -# running on). -# -# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES -# COMMENT OUT THE FOLLOWING LINE. -# -# You will also need to set a password unless you explicitly disable protected -# mode. -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -bind 127.0.0.1 -::1 - -# By default, outgoing connections (from replica to master, from Sentinel to -# instances, cluster bus, etc.) are not bound to a specific local address. In -# most cases, this means the operating system will handle that based on routing -# and the interface through which the connection goes out. -# -# Using bind-source-addr it is possible to configure a specific address to bind -# to, which may also affect how the connection gets routed. -# -# Example: -# -# bind-source-addr 10.0.0.1 - -# Protected mode is a layer of security protection, in order to avoid that -# Redis instances left open on the internet are accessed and exploited. -# -# When protected mode is on and the default user has no password, the server -# only accepts local connections from the IPv4 address (127.0.0.1), IPv6 address -# (::1) or Unix domain sockets. -# -# By default protected mode is enabled. You should disable it only if -# you are sure you want clients from other hosts to connect to Redis -# even if no authentication is configured. -protected-mode yes - -# Redis uses default hardened security configuration directives to reduce the -# attack surface on innocent users. Therefore, several sensitive configuration -# directives are immutable, and some potentially-dangerous commands are blocked. -# -# Configuration directives that control files that Redis writes to (e.g., 'dir' -# and 'dbfilename') and that aren't usually modified during runtime -# are protected by making them immutable. -# -# Commands that can increase the attack surface of Redis and that aren't usually -# called by users are blocked by default. -# -# These can be exposed to either all connections or just local ones by setting -# each of the configs listed below to either of these values: -# -# no - Block for any connection (remain immutable) -# yes - Allow for any connection (no protection) -# local - Allow only for local connections. Ones originating from the -# IPv4 address (127.0.0.1), IPv6 address (::1) or Unix domain sockets. -# -# enable-protected-configs no -# enable-debug-command no -# enable-module-command no - -# Accept connections on the specified port, default is 6379 (IANA #815344). -# If port 0 is specified Redis will not listen on a TCP socket. -port 6379 - -# TCP listen() backlog. -# -# In high requests-per-second environments you need a high backlog in order -# to avoid slow clients connection issues. Note that the Linux kernel -# will silently truncate it to the value of /proc/sys/net/core/somaxconn so -# make sure to raise both the value of somaxconn and tcp_max_syn_backlog -# in order to get the desired effect. -tcp-backlog 511 - -# Unix socket. -# -# Specify the path for the Unix socket that will be used to listen for -# incoming connections. There is no default, so Redis will not listen -# on a unix socket when not specified. -# -# unixsocket /run/redis.sock -# unixsocketperm 700 - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# TCP keepalive. -# -# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence -# of communication. This is useful for two reasons: -# -# 1) Detect dead peers. -# 2) Force network equipment in the middle to consider the connection to be -# alive. -# -# On Linux, the specified value (in seconds) is the period used to send ACKs. -# Note that to close the connection the double of the time is needed. -# On other kernels the period depends on the kernel configuration. -# -# A reasonable value for this option is 300 seconds, which is the new -# Redis default starting with Redis 3.2.1. -tcp-keepalive 300 - -# Apply OS-specific mechanism to mark the listening socket with the specified -# ID, to support advanced routing and filtering capabilities. -# -# On Linux, the ID represents a connection mark. -# On FreeBSD, the ID represents a socket cookie ID. -# On OpenBSD, the ID represents a route table ID. -# -# The default value is 0, which implies no marking is required. -# socket-mark-id 0 - -################################# TLS/SSL ##################################### - -# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration -# directive can be used to define TLS-listening ports. To enable TLS on the -# default port, use: -# -# port 0 -# tls-port 6379 - -# Configure a X.509 certificate and private key to use for authenticating the -# server to connected clients, masters or cluster peers. These files should be -# PEM formatted. -# -# tls-cert-file redis.crt -# tls-key-file redis.key -# -# If the key file is encrypted using a passphrase, it can be included here -# as well. -# -# tls-key-file-pass secret - -# Normally Redis uses the same certificate for both server functions (accepting -# connections) and client functions (replicating from a master, establishing -# cluster bus connections, etc.). -# -# Sometimes certificates are issued with attributes that designate them as -# client-only or server-only certificates. In that case it may be desired to use -# different certificates for incoming (server) and outgoing (client) -# connections. To do that, use the following directives: -# -# tls-client-cert-file client.crt -# tls-client-key-file client.key -# -# If the key file is encrypted using a passphrase, it can be included here -# as well. -# -# tls-client-key-file-pass secret - -# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange, -# required by older versions of OpenSSL (<3.0). Newer versions do not require -# this configuration and recommend against it. -# -# tls-dh-params-file redis.dh - -# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL -# clients and peers. Redis requires an explicit configuration of at least one -# of these, and will not implicitly use the system wide configuration. -# -# tls-ca-cert-file ca.crt -# tls-ca-cert-dir /etc/ssl/certs - -# By default, clients (including replica servers) on a TLS port are required -# to authenticate using valid client side certificates. -# -# If "no" is specified, client certificates are not required and not accepted. -# If "optional" is specified, client certificates are accepted and must be -# valid if provided, but are not required. -# -# tls-auth-clients no -# tls-auth-clients optional - -# By default, a Redis replica does not attempt to establish a TLS connection -# with its master. -# -# Use the following directive to enable TLS on replication links. -# -# tls-replication yes - -# By default, the Redis Cluster bus uses a plain TCP connection. To enable -# TLS for the bus protocol, use the following directive: -# -# tls-cluster yes - -# By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended -# that older formally deprecated versions are kept disabled to reduce the attack surface. -# You can explicitly specify TLS versions to support. -# Allowed values are case insensitive and include "TLSv1", "TLSv1.1", "TLSv1.2", -# "TLSv1.3" (OpenSSL >= 1.1.1) or any combination. -# To enable only TLSv1.2 and TLSv1.3, use: -# -# tls-protocols "TLSv1.2 TLSv1.3" - -# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information -# about the syntax of this string. -# -# Note: this configuration applies only to <= TLSv1.2. -# -# tls-ciphers DEFAULT:!MEDIUM - -# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more -# information about the syntax of this string, and specifically for TLSv1.3 -# ciphersuites. -# -# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256 - -# When choosing a cipher, use the server's preference instead of the client -# preference. By default, the server follows the client's preference. -# -# tls-prefer-server-ciphers yes - -# By default, TLS session caching is enabled to allow faster and less expensive -# reconnections by clients that support it. Use the following directive to disable -# caching. -# -# tls-session-caching no - -# Change the default number of TLS sessions cached. A zero value sets the cache -# to unlimited size. The default size is 20480. -# -# tls-session-cache-size 5000 - -# Change the default timeout of cached TLS sessions. The default timeout is 300 -# seconds. -# -# tls-session-cache-timeout 60 - -################################# GENERAL ##################################### - -# By default Redis does not run as a daemon. Use 'yes' if you need it. -# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. -# When Redis is supervised by upstart or systemd, this parameter has no impact. -daemonize no - -# If you run Redis from upstart or systemd, Redis can interact with your -# supervision tree. Options: -# supervised no - no supervision interaction -# supervised upstart - signal upstart by putting Redis into SIGSTOP mode -# requires "expect stop" in your upstart job config -# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET -# on startup, and updating Redis status on a regular -# basis. -# supervised auto - detect upstart or systemd method based on -# UPSTART_JOB or NOTIFY_SOCKET environment variables -# Note: these supervision methods only signal "process is ready." -# They do not enable continuous pings back to your supervisor. -# -# The default is "no". To run under upstart/systemd, you can simply uncomment -# the line below: -# -# supervised auto - -# If a pid file is specified, Redis writes it where specified at startup -# and removes it at exit. -# -# When the server runs non daemonized, no pid file is created if none is -# specified in the configuration. When the server is daemonized, the pid file -# is used even if not specified, defaulting to "/var/run/redis.pid". -# -# Creating a pid file is best effort: if Redis is not able to create it -# nothing bad happens, the server will start and run normally. -# -# Note that on modern Linux systems "/run/redis.pid" is more conforming -# and should be used instead. -pidfile /var/run/redis_6379.pid - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel warning - -# Specify the log file name. Also the empty string can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile "" - -# To enable logging to the system logger, just set 'syslog-enabled' to yes, -# and optionally update the other syslog parameters to suit your needs. -# syslog-enabled no - -# Specify the syslog identity. -# syslog-ident redis - -# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. -# syslog-facility local0 - -# To disable the built in crash log, which will possibly produce cleaner core -# dumps when they are needed, uncomment the following: -# -# crash-log-enabled no - -# To disable the fast memory check that's run as part of the crash log, which -# will possibly let redis terminate sooner, uncomment the following: -# -# crash-memcheck-enabled no - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -# By default Redis shows an ASCII art logo only when started to log to the -# standard output and if the standard output is a TTY and syslog logging is -# disabled. Basically this means that normally a logo is displayed only in -# interactive sessions. -# -# However it is possible to force the pre-4.0 behavior and always show a -# ASCII art logo in startup logs by setting the following option to yes. -always-show-logo no - -# By default, Redis modifies the process title (as seen in 'top' and 'ps') to -# provide some runtime information. It is possible to disable this and leave -# the process name as executed by setting the following to no. -set-proc-title yes - -# When changing the process title, Redis uses the following template to construct -# the modified title. -# -# Template variables are specified in curly brackets. The following variables are -# supported: -# -# {title} Name of process as executed if parent, or type of child process. -# {listen-addr} Bind address or '*' followed by TCP or TLS port listening on, or -# Unix socket if only that's available. -# {server-mode} Special mode, i.e. "[sentinel]" or "[cluster]". -# {port} TCP port listening on, or 0. -# {tls-port} TLS port listening on, or 0. -# {unixsocket} Unix domain socket listening on, or "". -# {config-file} Name of configuration file used. -# -proc-title-template "{title} {listen-addr} {server-mode}" - -################################ SNAPSHOTTING ################################ - -# Save the DB to disk. -# -# save [ ...] -# -# Redis will save the DB if the given number of seconds elapsed and it -# surpassed the given number of write operations against the DB. -# -# Snapshotting can be completely disabled with a single empty string argument -# as in following example: -# -# save "" -# -# Unless specified otherwise, by default Redis will save the DB: -# * After 3600 seconds (an hour) if at least 1 change was performed -# * After 300 seconds (5 minutes) if at least 100 changes were performed -# * After 60 seconds if at least 10000 changes were performed -# -# You can set these explicitly by uncommenting the following line. -# -# save 3600 1 300 100 60 10000 - -save 300 1 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in a hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# disaster will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usual even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes - -# Compress string objects using LZF when dump .rdb databases? -# By default compression is enabled as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes - -# Enables or disables full sanitization checks for ziplist and listpack etc when -# loading an RDB or RESTORE payload. This reduces the chances of a assertion or -# crash later on while processing commands. -# Options: -# no - Never perform full sanitization -# yes - Always perform full sanitization -# clients - Perform full sanitization only for user connections. -# Excludes: RDB files, RESTORE commands received from the master -# connection, and client connections which have the -# skip-sanitize-payload ACL flag. -# The default should be 'clients' but since it currently affects cluster -# resharding via MIGRATE, it is temporarily set to 'no' by default. -# -# sanitize-dump-payload no - -# The filename where to dump the DB -dbfilename dump.rdb - -# Remove RDB files used by replication in instances without persistence -# enabled. By default this option is disabled, however there are environments -# where for regulations or other security concerns, RDB files persisted on -# disk by masters in order to feed replicas, or stored on disk by replicas -# in order to load them for the initial synchronization, should be deleted -# ASAP. Note that this option ONLY WORKS in instances that have both AOF -# and RDB persistence disabled, otherwise is completely ignored. -# -# An alternative (and sometimes better) way to obtain the same effect is -# to use diskless replication on both master and replicas instances. However -# in the case of replicas, diskless is not always an option. -rdb-del-sync-files no - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir ./ - -################################# REPLICATION ################################# - -# Master-Replica replication. Use replicaof to make a Redis instance a copy of -# another Redis server. A few things to understand ASAP about Redis replication. -# -# +------------------+ +---------------+ -# | Master | ---> | Replica | -# | (receive writes) | | (exact copy) | -# +------------------+ +---------------+ -# -# 1) Redis replication is asynchronous, but you can configure a master to -# stop accepting writes if it appears to be not connected with at least -# a given number of replicas. -# 2) Redis replicas are able to perform a partial resynchronization with the -# master if the replication link is lost for a relatively small amount of -# time. You may want to configure the replication backlog size (see the next -# sections of this file) with a sensible value depending on your needs. -# 3) Replication is automatic and does not need user intervention. After a -# network partition replicas automatically try to reconnect to masters -# and resynchronize with them. -# -# replicaof - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the replica to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the replica request. -# -# masterauth -# -# However this is not enough if you are using Redis ACLs (for Redis version -# 6 or greater), and the default user is not capable of running the PSYNC -# command and/or other commands needed for replication. In this case it's -# better to configure a special user to use with replication, and specify the -# masteruser configuration as such: -# -# masteruser -# -# When masteruser is specified, the replica will authenticate against its -# master using the new AUTH form: AUTH . - -# When a replica loses its connection with the master, or when the replication -# is still in progress, the replica can act in two different ways: -# -# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) If replica-serve-stale-data is set to 'no' the replica will reply with error -# "MASTERDOWN Link with MASTER is down and replica-serve-stale-data is set to 'no'" -# to all data access commands, excluding commands such as: -# INFO, REPLICAOF, AUTH, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE, -# UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST, -# HOST and LATENCY. -# -replica-serve-stale-data yes - -# You can configure a replica instance to accept writes or not. Writing against -# a replica instance may be useful to store some ephemeral data (because data -# written on a replica will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default replicas are read-only. -# -# Note: read only replicas are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only replica exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve -# security of read only replicas using 'rename-command' to shadow all the -# administrative / dangerous commands. -replica-read-only yes - -# Replication SYNC strategy: disk or socket. -# -# New replicas and reconnecting replicas that are not able to continue the -# replication process just receiving differences, need to do what is called a -# "full synchronization". An RDB file is transmitted from the master to the -# replicas. -# -# The transmission can happen in two different ways: -# -# 1) Disk-backed: The Redis master creates a new process that writes the RDB -# file on disk. Later the file is transferred by the parent -# process to the replicas incrementally. -# 2) Diskless: The Redis master creates a new process that directly writes the -# RDB file to replica sockets, without touching the disk at all. -# -# With disk-backed replication, while the RDB file is generated, more replicas -# can be queued and served with the RDB file as soon as the current child -# producing the RDB file finishes its work. With diskless replication instead -# once the transfer starts, new replicas arriving will be queued and a new -# transfer will start when the current one terminates. -# -# When diskless replication is used, the master waits a configurable amount of -# time (in seconds) before starting the transfer in the hope that multiple -# replicas will arrive and the transfer can be parallelized. -# -# With slow disks and fast (large bandwidth) networks, diskless replication -# works better. -repl-diskless-sync yes - -# When diskless replication is enabled, it is possible to configure the delay -# the server waits in order to spawn the child that transfers the RDB via socket -# to the replicas. -# -# This is important since once the transfer starts, it is not possible to serve -# new replicas arriving, that will be queued for the next RDB transfer, so the -# server waits a delay in order to let more replicas arrive. -# -# The delay is specified in seconds, and by default is 5 seconds. To disable -# it entirely just set it to 0 seconds and the transfer will start ASAP. -repl-diskless-sync-delay 5 - -# When diskless replication is enabled with a delay, it is possible to let -# the replication start before the maximum delay is reached if the maximum -# number of replicas expected have connected. Default of 0 means that the -# maximum is not defined and Redis will wait the full delay. -repl-diskless-sync-max-replicas 0 - -# ----------------------------------------------------------------------------- -# WARNING: RDB diskless load is experimental. Since in this setup the replica -# does not immediately store an RDB on disk, it may cause data loss during -# failovers. RDB diskless load + Redis modules not handling I/O reads may also -# cause Redis to abort in case of I/O errors during the initial synchronization -# stage with the master. Use only if you know what you are doing. -# ----------------------------------------------------------------------------- -# -# Replica can load the RDB it reads from the replication link directly from the -# socket, or store the RDB to a file and read that file after it was completely -# received from the master. -# -# In many cases the disk is slower than the network, and storing and loading -# the RDB file may increase replication time (and even increase the master's -# Copy on Write memory and replica buffers). -# However, parsing the RDB file directly from the socket may mean that we have -# to flush the contents of the current database before the full rdb was -# received. For this reason we have the following options: -# -# "disabled" - Don't use diskless load (store the rdb file to the disk first) -# "on-empty-db" - Use diskless load only when it is completely safe. -# "swapdb" - Keep current db contents in RAM while parsing the data directly -# from the socket. Replicas in this mode can keep serving current -# data set while replication is in progress, except for cases where -# they can't recognize master as having a data set from same -# replication history. -# Note that this requires sufficient memory, if you don't have it, -# you risk an OOM kill. -repl-diskless-load disabled - -# Master send PINGs to its replicas in a predefined interval. It's possible to -# change this interval with the repl_ping_replica_period option. The default -# value is 10 seconds. -# -# repl-ping-replica-period 10 - -# The following option sets the replication timeout for: -# -# 1) Bulk transfer I/O during SYNC, from the point of view of replica. -# 2) Master timeout from the point of view of replicas (data, pings). -# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-replica-period otherwise a timeout will be detected -# every time there is low traffic between the master and the replica. The default -# value is 60 seconds. -# -# repl-timeout 60 - -# Disable TCP_NODELAY on the replica socket after SYNC? -# -# If you select "yes" Redis will use a smaller number of TCP packets and -# less bandwidth to send data to replicas. But this can add a delay for -# the data to appear on the replica side, up to 40 milliseconds with -# Linux kernels using a default configuration. -# -# If you select "no" the delay for data to appear on the replica side will -# be reduced but more bandwidth will be used for replication. -# -# By default we optimize for low latency, but in very high traffic conditions -# or when the master and replicas are many hops away, turning this to "yes" may -# be a good idea. -repl-disable-tcp-nodelay no - -# Set the replication backlog size. The backlog is a buffer that accumulates -# replica data when replicas are disconnected for some time, so that when a -# replica wants to reconnect again, often a full resync is not needed, but a -# partial resync is enough, just passing the portion of data the replica -# missed while disconnected. -# -# The bigger the replication backlog, the longer the replica can endure the -# disconnect and later be able to perform a partial resynchronization. -# -# The backlog is only allocated if there is at least one replica connected. -# -# repl-backlog-size 1mb - -# After a master has no connected replicas for some time, the backlog will be -# freed. The following option configures the amount of seconds that need to -# elapse, starting from the time the last replica disconnected, for the backlog -# buffer to be freed. -# -# Note that replicas never free the backlog for timeout, since they may be -# promoted to masters later, and should be able to correctly "partially -# resynchronize" with other replicas: hence they should always accumulate backlog. -# -# A value of 0 means to never release the backlog. -# -# repl-backlog-ttl 3600 - -# The replica priority is an integer number published by Redis in the INFO -# output. It is used by Redis Sentinel in order to select a replica to promote -# into a master if the master is no longer working correctly. -# -# A replica with a low priority number is considered better for promotion, so -# for instance if there are three replicas with priority 10, 100, 25 Sentinel -# will pick the one with priority 10, that is the lowest. -# -# However a special priority of 0 marks the replica as not able to perform the -# role of master, so a replica with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -replica-priority 100 - -# The propagation error behavior controls how Redis will behave when it is -# unable to handle a command being processed in the replication stream from a master -# or processed while reading from an AOF file. Errors that occur during propagation -# are unexpected, and can cause data inconsistency. However, there are edge cases -# in earlier versions of Redis where it was possible for the server to replicate or persist -# commands that would fail on future versions. For this reason the default behavior -# is to ignore such errors and continue processing commands. -# -# If an application wants to ensure there is no data divergence, this configuration -# should be set to 'panic' instead. The value can also be set to 'panic-on-replicas' -# to only panic when a replica encounters an error on the replication stream. One of -# these two panic values will become the default value in the future once there are -# sufficient safety mechanisms in place to prevent false positive crashes. -# -# propagation-error-behavior ignore - -# Replica ignore disk write errors controls the behavior of a replica when it is -# unable to persist a write command received from its master to disk. By default, -# this configuration is set to 'no' and will crash the replica in this condition. -# It is not recommended to change this default, however in order to be compatible -# with older versions of Redis this config can be toggled to 'yes' which will just -# log a warning and execute the write command it got from the master. -# -# replica-ignore-disk-write-errors no - -# ----------------------------------------------------------------------------- -# By default, Redis Sentinel includes all replicas in its reports. A replica -# can be excluded from Redis Sentinel's announcements. An unannounced replica -# will be ignored by the 'sentinel replicas ' command and won't be -# exposed to Redis Sentinel's clients. -# -# This option does not change the behavior of replica-priority. Even with -# replica-announced set to 'no', the replica can be promoted to master. To -# prevent this behavior, set replica-priority to 0. -# -# replica-announced yes - -# It is possible for a master to stop accepting writes if there are less than -# N replicas connected, having a lag less or equal than M seconds. -# -# The N replicas need to be in "online" state. -# -# The lag in seconds, that must be <= the specified value, is calculated from -# the last ping received from the replica, that is usually sent every second. -# -# This option does not GUARANTEE that N replicas will accept the write, but -# will limit the window of exposure for lost writes in case not enough replicas -# are available, to the specified number of seconds. -# -# For example to require at least 3 replicas with a lag <= 10 seconds use: -# -# min-replicas-to-write 3 -# min-replicas-max-lag 10 -# -# Setting one or the other to 0 disables the feature. -# -# By default min-replicas-to-write is set to 0 (feature disabled) and -# min-replicas-max-lag is set to 10. - -# A Redis master is able to list the address and port of the attached -# replicas in different ways. For example the "INFO replication" section -# offers this information, which is used, among other tools, by -# Redis Sentinel in order to discover replica instances. -# Another place where this info is available is in the output of the -# "ROLE" command of a master. -# -# The listed IP address and port normally reported by a replica is -# obtained in the following way: -# -# IP: The address is auto detected by checking the peer address -# of the socket used by the replica to connect with the master. -# -# Port: The port is communicated by the replica during the replication -# handshake, and is normally the port that the replica is using to -# listen for connections. -# -# However when port forwarding or Network Address Translation (NAT) is -# used, the replica may actually be reachable via different IP and port -# pairs. The following two options can be used by a replica in order to -# report to its master a specific set of IP and port, so that both INFO -# and ROLE will report those values. -# -# There is no need to use both the options if you need to override just -# the port or the IP address. -# -# replica-announce-ip 5.5.5.5 -# replica-announce-port 1234 - -############################### KEYS TRACKING ################################# - -# Redis implements server assisted support for client side caching of values. -# This is implemented using an invalidation table that remembers, using -# a radix key indexed by key name, what clients have which keys. In turn -# this is used in order to send invalidation messages to clients. Please -# check this page to understand more about the feature: -# -# https://redis.io/topics/client-side-caching -# -# When tracking is enabled for a client, all the read only queries are assumed -# to be cached: this will force Redis to store information in the invalidation -# table. When keys are modified, such information is flushed away, and -# invalidation messages are sent to the clients. However if the workload is -# heavily dominated by reads, Redis could use more and more memory in order -# to track the keys fetched by many clients. -# -# For this reason it is possible to configure a maximum fill value for the -# invalidation table. By default it is set to 1M of keys, and once this limit -# is reached, Redis will start to evict keys in the invalidation table -# even if they were not modified, just to reclaim memory: this will in turn -# force the clients to invalidate the cached values. Basically the table -# maximum size is a trade off between the memory you want to spend server -# side to track information about who cached what, and the ability of clients -# to retain cached objects in memory. -# -# If you set the value to 0, it means there are no limits, and Redis will -# retain as many keys as needed in the invalidation table. -# In the "stats" INFO section, you can find information about the number of -# keys in the invalidation table at every given moment. -# -# Note: when key tracking is used in broadcasting mode, no memory is used -# in the server side so this setting is useless. -# -# tracking-table-max-keys 1000000 - -################################## SECURITY ################################### - -# Warning: since Redis is pretty fast, an outside user can try up to -# 1 million passwords per second against a modern box. This means that you -# should use very strong passwords, otherwise they will be very easy to break. -# Note that because the password is really a shared secret between the client -# and the server, and should not be memorized by any human, the password -# can be easily a long string from /dev/urandom or whatever, so by using a -# long and unguessable password no brute force attack will be possible. - -# Redis ACL users are defined in the following format: -# -# user ... acl rules ... -# -# For example: -# -# user worker +@list +@connection ~jobs:* on >ffa9203c493aa99 -# -# The special username "default" is used for new connections. If this user -# has the "nopass" rule, then new connections will be immediately authenticated -# as the "default" user without the need of any password provided via the -# AUTH command. Otherwise if the "default" user is not flagged with "nopass" -# the connections will start in not authenticated state, and will require -# AUTH (or the HELLO command AUTH option) in order to be authenticated and -# start to work. -# -# The ACL rules that describe what a user can do are the following: -# -# on Enable the user: it is possible to authenticate as this user. -# off Disable the user: it's no longer possible to authenticate -# with this user, however the already authenticated connections -# will still work. -# skip-sanitize-payload RESTORE dump-payload sanitization is skipped. -# sanitize-payload RESTORE dump-payload is sanitized (default). -# + Allow the execution of that command. -# May be used with `|` for allowing subcommands (e.g "+config|get") -# - Disallow the execution of that command. -# May be used with `|` for blocking subcommands (e.g "-config|set") -# +@ Allow the execution of all the commands in such category -# with valid categories are like @admin, @set, @sortedset, ... -# and so forth, see the full list in the server.c file where -# the Redis command table is described and defined. -# The special category @all means all the commands, but currently -# present in the server, and that will be loaded in the future -# via modules. -# +|first-arg Allow a specific first argument of an otherwise -# disabled command. It is only supported on commands with -# no sub-commands, and is not allowed as negative form -# like -SELECT|1, only additive starting with "+". This -# feature is deprecated and may be removed in the future. -# allcommands Alias for +@all. Note that it implies the ability to execute -# all the future commands loaded via the modules system. -# nocommands Alias for -@all. -# ~ Add a pattern of keys that can be mentioned as part of -# commands. For instance ~* allows all the keys. The pattern -# is a glob-style pattern like the one of KEYS. -# It is possible to specify multiple patterns. -# %R~ Add key read pattern that specifies which keys can be read -# from. -# %W~ Add key write pattern that specifies which keys can be -# written to. -# allkeys Alias for ~* -# resetkeys Flush the list of allowed keys patterns. -# & Add a glob-style pattern of Pub/Sub channels that can be -# accessed by the user. It is possible to specify multiple channel -# patterns. -# allchannels Alias for &* -# resetchannels Flush the list of allowed channel patterns. -# > Add this password to the list of valid password for the user. -# For example >mypass will add "mypass" to the list. -# This directive clears the "nopass" flag (see later). -# < Remove this password from the list of valid passwords. -# nopass All the set passwords of the user are removed, and the user -# is flagged as requiring no password: it means that every -# password will work against this user. If this directive is -# used for the default user, every new connection will be -# immediately authenticated with the default user without -# any explicit AUTH command required. Note that the "resetpass" -# directive will clear this condition. -# resetpass Flush the list of allowed passwords. Moreover removes the -# "nopass" status. After "resetpass" the user has no associated -# passwords and there is no way to authenticate without adding -# some password (or setting it as "nopass" later). -# reset Performs the following actions: resetpass, resetkeys, off, -# -@all. The user returns to the same state it has immediately -# after its creation. -# () Create a new selector with the options specified within the -# parentheses and attach it to the user. Each option should be -# space separated. The first character must be ( and the last -# character must be ). -# clearselectors Remove all of the currently attached selectors. -# Note this does not change the "root" user permissions, -# which are the permissions directly applied onto the -# user (outside the parentheses). -# -# ACL rules can be specified in any order: for instance you can start with -# passwords, then flags, or key patterns. However note that the additive -# and subtractive rules will CHANGE MEANING depending on the ordering. -# For instance see the following example: -# -# user alice on +@all -DEBUG ~* >somepassword -# -# This will allow "alice" to use all the commands with the exception of the -# DEBUG command, since +@all added all the commands to the set of the commands -# alice can use, and later DEBUG was removed. However if we invert the order -# of two ACL rules the result will be different: -# -# user alice on -DEBUG +@all ~* >somepassword -# -# Now DEBUG was removed when alice had yet no commands in the set of allowed -# commands, later all the commands are added, so the user will be able to -# execute everything. -# -# Basically ACL rules are processed left-to-right. -# -# The following is a list of command categories and their meanings: -# * keyspace - Writing or reading from keys, databases, or their metadata -# in a type agnostic way. Includes DEL, RESTORE, DUMP, RENAME, EXISTS, DBSIZE, -# KEYS, EXPIRE, TTL, FLUSHALL, etc. Commands that may modify the keyspace, -# key or metadata will also have `write` category. Commands that only read -# the keyspace, key or metadata will have the `read` category. -# * read - Reading from keys (values or metadata). Note that commands that don't -# interact with keys, will not have either `read` or `write`. -# * write - Writing to keys (values or metadata) -# * admin - Administrative commands. Normal applications will never need to use -# these. Includes REPLICAOF, CONFIG, DEBUG, SAVE, MONITOR, ACL, SHUTDOWN, etc. -# * dangerous - Potentially dangerous (each should be considered with care for -# various reasons). This includes FLUSHALL, MIGRATE, RESTORE, SORT, KEYS, -# CLIENT, DEBUG, INFO, CONFIG, SAVE, REPLICAOF, etc. -# * connection - Commands affecting the connection or other connections. -# This includes AUTH, SELECT, COMMAND, CLIENT, ECHO, PING, etc. -# * blocking - Potentially blocking the connection until released by another -# command. -# * fast - Fast O(1) commands. May loop on the number of arguments, but not the -# number of elements in the key. -# * slow - All commands that are not Fast. -# * pubsub - PUBLISH / SUBSCRIBE related -# * transaction - WATCH / MULTI / EXEC related commands. -# * scripting - Scripting related. -# * set - Data type: sets related. -# * sortedset - Data type: zsets related. -# * list - Data type: lists related. -# * hash - Data type: hashes related. -# * string - Data type: strings related. -# * bitmap - Data type: bitmaps related. -# * hyperloglog - Data type: hyperloglog related. -# * geo - Data type: geo related. -# * stream - Data type: streams related. -# -# For more information about ACL configuration please refer to -# the Redis web site at https://redis.io/topics/acl - -# ACL LOG -# -# The ACL Log tracks failed commands and authentication events associated -# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked -# by ACLs. The ACL Log is stored in memory. You can reclaim memory with -# ACL LOG RESET. Define the maximum entry length of the ACL Log below. -acllog-max-len 128 - -# Using an external ACL file -# -# Instead of configuring users here in this file, it is possible to use -# a stand-alone file just listing users. The two methods cannot be mixed: -# if you configure users here and at the same time you activate the external -# ACL file, the server will refuse to start. -# -# The format of the external ACL user file is exactly the same as the -# format that is used inside redis.conf to describe users. -# -# aclfile /etc/redis/users.acl - -# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility -# layer on top of the new ACL system. The option effect will be just setting -# the password for the default user. Clients will still authenticate using -# AUTH as usually, or more explicitly with AUTH default -# if they follow the new protocol: both will work. -# -# The requirepass is not compatible with aclfile option and the ACL LOAD -# command, these will cause requirepass to be ignored. -# -# requirepass foobared - -# New users are initialized with restrictive permissions by default, via the -# equivalent of this ACL rule 'off resetkeys -@all'. Starting with Redis 6.2, it -# is possible to manage access to Pub/Sub channels with ACL rules as well. The -# default Pub/Sub channels permission if new users is controlled by the -# acl-pubsub-default configuration directive, which accepts one of these values: -# -# allchannels: grants access to all Pub/Sub channels -# resetchannels: revokes access to all Pub/Sub channels -# -# From Redis 7.0, acl-pubsub-default defaults to 'resetchannels' permission. -# -# acl-pubsub-default resetchannels - -# Command renaming (DEPRECATED). -# -# ------------------------------------------------------------------------ -# WARNING: avoid using this option if possible. Instead use ACLs to remove -# commands from the default user, and put them only in some admin user you -# create for administrative purposes. -# ------------------------------------------------------------------------ -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to replicas may cause problems. - -################################### CLIENTS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# IMPORTANT: When Redis Cluster is used, the max number of connections is also -# shared with the cluster bus: every node in the cluster will use two -# connections, one incoming and another outgoing. It is important to size the -# limit accordingly in case of very large clusters. -# -# maxclients 10000 - -############################## MEMORY MANAGEMENT ################################ - -# Set a memory usage limit to the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# according to the eviction policy selected (see maxmemory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -# This option is usually useful when using Redis as an LRU or LFU cache, or to -# set a hard memory limit for an instance (using the 'noeviction' policy). -# -# WARNING: If you have replicas attached to an instance with maxmemory on, -# the size of the output buffers needed to feed the replicas are subtracted -# from the used memory count, so that network problems / resyncs will -# not trigger a loop where keys are evicted, and in turn the output -# buffer of replicas is full with DELs of keys evicted triggering the deletion -# of more keys, and so forth until the database is completely emptied. -# -# In short... if you have replicas attached it is suggested that you set a lower -# limit for maxmemory so that there is some free RAM on the system for replica -# output buffers (but this is not needed if the policy is 'noeviction'). -# -# maxmemory - -# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory -# is reached. You can select one from the following behaviors: -# -# volatile-lru -> Evict using approximated LRU, only keys with an expire set. -# allkeys-lru -> Evict any key using approximated LRU. -# volatile-lfu -> Evict using approximated LFU, only keys with an expire set. -# allkeys-lfu -> Evict any key using approximated LFU. -# volatile-random -> Remove a random key having an expire set. -# allkeys-random -> Remove a random key, any key. -# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) -# noeviction -> Don't evict anything, just return an error on write operations. -# -# LRU means Least Recently Used -# LFU means Least Frequently Used -# -# Both LRU, LFU and volatile-ttl are implemented using approximated -# randomized algorithms. -# -# Note: with any of the above policies, when there are no suitable keys for -# eviction, Redis will return an error on write operations that require -# more memory. These are usually commands that create new keys, add data or -# modify existing keys. A few examples are: SET, INCR, HSET, LPUSH, SUNIONSTORE, -# SORT (due to the STORE argument), and EXEC (if the transaction includes any -# command that requires memory). -# -# The default is: -# -# maxmemory-policy noeviction - -# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can tune it for speed or -# accuracy. By default Redis will check five keys and pick the one that was -# used least recently, you can change the sample size using the following -# configuration directive. -# -# The default of 5 produces good enough results. 10 Approximates very closely -# true LRU but costs more CPU. 3 is faster but not very accurate. -# -# maxmemory-samples 5 - -# Eviction processing is designed to function well with the default setting. -# If there is an unusually large amount of write traffic, this value may need to -# be increased. Decreasing this value may reduce latency at the risk of -# eviction processing effectiveness -# 0 = minimum latency, 10 = default, 100 = process without regard to latency -# -# maxmemory-eviction-tenacity 10 - -# Starting from Redis 5, by default a replica will ignore its maxmemory setting -# (unless it is promoted to master after a failover or manually). It means -# that the eviction of keys will be just handled by the master, sending the -# DEL commands to the replica as keys evict in the master side. -# -# This behavior ensures that masters and replicas stay consistent, and is usually -# what you want, however if your replica is writable, or you want the replica -# to have a different memory setting, and you are sure all the writes performed -# to the replica are idempotent, then you may change this default (but be sure -# to understand what you are doing). -# -# Note that since the replica by default does not evict, it may end using more -# memory than the one set via maxmemory (there are certain buffers that may -# be larger on the replica, or data structures may sometimes take more memory -# and so forth). So make sure you monitor your replicas and make sure they -# have enough memory to never hit a real out-of-memory condition before the -# master hits the configured maxmemory setting. -# -# replica-ignore-maxmemory yes - -# Redis reclaims expired keys in two ways: upon access when those keys are -# found to be expired, and also in background, in what is called the -# "active expire key". The key space is slowly and interactively scanned -# looking for expired keys to reclaim, so that it is possible to free memory -# of keys that are expired and will never be accessed again in a short time. -# -# The default effort of the expire cycle will try to avoid having more than -# ten percent of expired keys still in memory, and will try to avoid consuming -# more than 25% of total memory and to add latency to the system. However -# it is possible to increase the expire "effort" that is normally set to -# "1", to a greater value, up to the value "10". At its maximum value the -# system will use more CPU, longer cycles (and technically may introduce -# more latency), and will tolerate less already expired keys still present -# in the system. It's a tradeoff between memory, CPU and latency. -# -# active-expire-effort 1 - -############################# LAZY FREEING #################################### - -# Redis has two primitives to delete keys. One is called DEL and is a blocking -# deletion of the object. It means that the server stops processing new commands -# in order to reclaim all the memory associated with an object in a synchronous -# way. If the key deleted is associated with a small object, the time needed -# in order to execute the DEL command is very small and comparable to most other -# O(1) or O(log_N) commands in Redis. However if the key is associated with an -# aggregated value containing millions of elements, the server can block for -# a long time (even seconds) in order to complete the operation. -# -# For the above reasons Redis also offers non blocking deletion primitives -# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and -# FLUSHDB commands, in order to reclaim memory in background. Those commands -# are executed in constant time. Another thread will incrementally free the -# object in the background as fast as possible. -# -# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. -# It's up to the design of the application to understand when it is a good -# idea to use one or the other. However the Redis server sometimes has to -# delete keys or flush the whole database as a side effect of other operations. -# Specifically Redis deletes objects independently of a user call in the -# following scenarios: -# -# 1) On eviction, because of the maxmemory and maxmemory policy configurations, -# in order to make room for new data, without going over the specified -# memory limit. -# 2) Because of expire: when a key with an associated time to live (see the -# EXPIRE command) must be deleted from memory. -# 3) Because of a side effect of a command that stores data on a key that may -# already exist. For example the RENAME command may delete the old key -# content when it is replaced with another one. Similarly SUNIONSTORE -# or SORT with STORE option may delete existing keys. The SET command -# itself removes any old content of the specified key in order to replace -# it with the specified string. -# 4) During replication, when a replica performs a full resynchronization with -# its master, the content of the whole database is removed in order to -# load the RDB file just transferred. -# -# In all the above cases the default is to delete objects in a blocking way, -# like if DEL was called. However you can configure each case specifically -# in order to instead release memory in a non-blocking way like if UNLINK -# was called, using the following configuration directives. - -lazyfree-lazy-eviction no -lazyfree-lazy-expire no -lazyfree-lazy-server-del no -replica-lazy-flush no - -# It is also possible, for the case when to replace the user code DEL calls -# with UNLINK calls is not easy, to modify the default behavior of the DEL -# command to act exactly like UNLINK, using the following configuration -# directive: - -lazyfree-lazy-user-del no - -# FLUSHDB, FLUSHALL, SCRIPT FLUSH and FUNCTION FLUSH support both asynchronous and synchronous -# deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the -# commands. When neither flag is passed, this directive will be used to determine -# if the data should be deleted asynchronously. - -lazyfree-lazy-user-flush no - -################################ THREADED I/O ################################# - -# Redis is mostly single threaded, however there are certain threaded -# operations such as UNLINK, slow I/O accesses and other things that are -# performed on side threads. -# -# Now it is also possible to handle Redis clients socket reads and writes -# in different I/O threads. Since especially writing is so slow, normally -# Redis users use pipelining in order to speed up the Redis performances per -# core, and spawn multiple instances in order to scale more. Using I/O -# threads it is possible to easily speedup two times Redis without resorting -# to pipelining nor sharding of the instance. -# -# By default threading is disabled, we suggest enabling it only in machines -# that have at least 4 or more cores, leaving at least one spare core. -# Using more than 8 threads is unlikely to help much. We also recommend using -# threaded I/O only if you actually have performance problems, with Redis -# instances being able to use a quite big percentage of CPU time, otherwise -# there is no point in using this feature. -# -# So for instance if you have a four cores boxes, try to use 2 or 3 I/O -# threads, if you have a 8 cores, try to use 6 threads. In order to -# enable I/O threads use the following configuration directive: -# -# io-threads 4 -# -# Setting io-threads to 1 will just use the main thread as usual. -# When I/O threads are enabled, we only use threads for writes, that is -# to thread the write(2) syscall and transfer the client buffers to the -# socket. However it is also possible to enable threading of reads and -# protocol parsing using the following configuration directive, by setting -# it to yes: -# -# io-threads-do-reads no -# -# Usually threading reads doesn't help much. -# -# NOTE 1: This configuration directive cannot be changed at runtime via -# CONFIG SET. Also, this feature currently does not work when SSL is -# enabled. -# -# NOTE 2: If you want to test the Redis speedup using redis-benchmark, make -# sure you also run the benchmark itself in threaded mode, using the -# --threads option to match the number of Redis threads, otherwise you'll not -# be able to notice the improvements. - -############################ KERNEL OOM CONTROL ############################## - -# On Linux, it is possible to hint the kernel OOM killer on what processes -# should be killed first when out of memory. -# -# Enabling this feature makes Redis actively control the oom_score_adj value -# for all its processes, depending on their role. The default scores will -# attempt to have background child processes killed before all others, and -# replicas killed before masters. -# -# Redis supports these options: -# -# no: Don't make changes to oom-score-adj (default). -# yes: Alias to "relative" see below. -# absolute: Values in oom-score-adj-values are written as is to the kernel. -# relative: Values are used relative to the initial value of oom_score_adj when -# the server starts and are then clamped to a range of -1000 to 1000. -# Because typically the initial value is 0, they will often match the -# absolute values. -oom-score-adj no - -# When oom-score-adj is used, this directive controls the specific values used -# for master, replica and background child processes. Values range -2000 to -# 2000 (higher means more likely to be killed). -# -# Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities) -# can freely increase their value, but not decrease it below its initial -# settings. This means that setting oom-score-adj to "relative" and setting the -# oom-score-adj-values to positive values will always succeed. -oom-score-adj-values 0 200 800 - - -#################### KERNEL transparent hugepage CONTROL ###################### - -# Usually the kernel Transparent Huge Pages control is set to "madvise" or -# or "never" by default (/sys/kernel/mm/transparent_hugepage/enabled), in which -# case this config has no effect. On systems in which it is set to "always", -# redis will attempt to disable it specifically for the redis process in order -# to avoid latency problems specifically with fork(2) and CoW. -# If for some reason you prefer to keep it enabled, you can set this config to -# "no" and the kernel global to "always". - -disable-thp yes - -############################## APPEND ONLY MODE ############################### - -# By default Redis asynchronously dumps the dataset on disk. This mode is -# good enough in many applications, but an issue with the Redis process or -# a power outage may result into a few minutes of writes lost (depending on -# the configured save points). -# -# The Append Only File is an alternative persistence mode that provides -# much better durability. For instance using the default data fsync policy -# (see later in the config file) Redis can lose just one second of writes in a -# dramatic event like a server power outage, or a single write if something -# wrong with the Redis process itself happens, but the operating system is -# still running correctly. -# -# AOF and RDB persistence can be enabled at the same time without problems. -# If the AOF is enabled on startup Redis will load the AOF, that is the file -# with the better durability guarantees. -# -# Please check https://redis.io/topics/persistence for more information. - -appendonly yes - -# The base name of the append only file. -# -# Redis 7 and newer use a set of append-only files to persist the dataset -# and changes applied to it. There are two basic types of files in use: -# -# - Base files, which are a snapshot representing the complete state of the -# dataset at the time the file was created. Base files can be either in -# the form of RDB (binary serialized) or AOF (textual commands). -# - Incremental files, which contain additional commands that were applied -# to the dataset following the previous file. -# -# In addition, manifest files are used to track the files and the order in -# which they were created and should be applied. -# -# Append-only file names are created by Redis following a specific pattern. -# The file name's prefix is based on the 'appendfilename' configuration -# parameter, followed by additional information about the sequence and type. -# -# For example, if appendfilename is set to appendonly.aof, the following file -# names could be derived: -# -# - appendonly.aof.1.base.rdb as a base file. -# - appendonly.aof.1.incr.aof, appendonly.aof.2.incr.aof as incremental files. -# - appendonly.aof.manifest as a manifest file. - -appendfilename "appendonly.aof" - -# For convenience, Redis stores all persistent append-only files in a dedicated -# directory. The name of the directory is determined by the appenddirname -# configuration parameter. - -appenddirname "appendonlydir" - -# The fsync() call tells the Operating System to actually write data on disk -# instead of waiting for more data in the output buffer. Some OS will really flush -# data on disk, some other OS will just try to do it ASAP. -# -# Redis supports three different modes: -# -# no: don't fsync, just let the OS flush the data when it wants. Faster. -# always: fsync after every write to the append only log. Slow, Safest. -# everysec: fsync only one time every second. Compromise. -# -# The default is "everysec", as that's usually the right compromise between -# speed and data safety. It's up to you to understand if you can relax this to -# "no" that will let the operating system flush the output buffer when -# it wants, for better performances (but if you can live with the idea of -# some data loss consider the default persistence mode that's snapshotting), -# or on the contrary, use "always" that's very slow but a bit safer than -# everysec. -# -# More details please check the following article: -# http://antirez.com/post/redis-persistence-demystified.html -# -# If unsure, use "everysec". - -# appendfsync always -appendfsync everysec -# appendfsync no - -# When the AOF fsync policy is set to always or everysec, and a background -# saving process (a background save or AOF log background rewriting) is -# performing a lot of I/O against the disk, in some Linux configurations -# Redis may block too long on the fsync() call. Note that there is no fix for -# this currently, as even performing fsync in a different thread will block -# our synchronous write(2) call. -# -# In order to mitigate this problem it's possible to use the following option -# that will prevent fsync() from being called in the main process while a -# BGSAVE or BGREWRITEAOF is in progress. -# -# This means that while another child is saving, the durability of Redis is -# the same as "appendfsync no". In practical terms, this means that it is -# possible to lose up to 30 seconds of log in the worst scenario (with the -# default Linux settings). -# -# If you have latency problems turn this to "yes". Otherwise leave it as -# "no" that is the safest pick from the point of view of durability. - -no-appendfsync-on-rewrite no - -# Automatic rewrite of the append only file. -# Redis is able to automatically rewrite the log file implicitly calling -# BGREWRITEAOF when the AOF log size grows by the specified percentage. -# -# This is how it works: Redis remembers the size of the AOF file after the -# latest rewrite (if no rewrite has happened since the restart, the size of -# the AOF at startup is used). -# -# This base size is compared to the current size. If the current size is -# bigger than the specified percentage, the rewrite is triggered. Also -# you need to specify a minimal size for the AOF file to be rewritten, this -# is useful to avoid rewriting the AOF file even if the percentage increase -# is reached but it is still pretty small. -# -# Specify a percentage of zero in order to disable the automatic AOF -# rewrite feature. - -auto-aof-rewrite-percentage 100 -auto-aof-rewrite-min-size 64mb - -# An AOF file may be found to be truncated at the end during the Redis -# startup process, when the AOF data gets loaded back into memory. -# This may happen when the system where Redis is running -# crashes, especially when an ext4 filesystem is mounted without the -# data=ordered option (however this can't happen when Redis itself -# crashes or aborts but the operating system still works correctly). -# -# Redis can either exit with an error when this happens, or load as much -# data as possible (the default now) and start if the AOF file is found -# to be truncated at the end. The following option controls this behavior. -# -# If aof-load-truncated is set to yes, a truncated AOF file is loaded and -# the Redis server starts emitting a log to inform the user of the event. -# Otherwise if the option is set to no, the server aborts with an error -# and refuses to start. When the option is set to no, the user requires -# to fix the AOF file using the "redis-check-aof" utility before to restart -# the server. -# -# Note that if the AOF file will be found to be corrupted in the middle -# the server will still exit with an error. This option only applies when -# Redis will try to read more data from the AOF file but not enough bytes -# will be found. -aof-load-truncated yes - -# Redis can create append-only base files in either RDB or AOF formats. Using -# the RDB format is always faster and more efficient, and disabling it is only -# supported for backward compatibility purposes. -aof-use-rdb-preamble yes - -# Redis supports recording timestamp annotations in the AOF to support restoring -# the data from a specific point-in-time. However, using this capability changes -# the AOF format in a way that may not be compatible with existing AOF parsers. -aof-timestamp-enabled no - -################################ SHUTDOWN ##################################### - -# Maximum time to wait for replicas when shutting down, in seconds. -# -# During shut down, a grace period allows any lagging replicas to catch up with -# the latest replication offset before the master exists. This period can -# prevent data loss, especially for deployments without configured disk backups. -# -# The 'shutdown-timeout' value is the grace period's duration in seconds. It is -# only applicable when the instance has replicas. To disable the feature, set -# the value to 0. -# -# shutdown-timeout 10 - -# When Redis receives a SIGINT or SIGTERM, shutdown is initiated and by default -# an RDB snapshot is written to disk in a blocking operation if save points are configured. -# The options used on signaled shutdown can include the following values: -# default: Saves RDB snapshot only if save points are configured. -# Waits for lagging replicas to catch up. -# save: Forces a DB saving operation even if no save points are configured. -# nosave: Prevents DB saving operation even if one or more save points are configured. -# now: Skips waiting for lagging replicas. -# force: Ignores any errors that would normally prevent the server from exiting. -# -# Any combination of values is allowed as long as "save" and "nosave" are not set simultaneously. -# Example: "nosave force now" -# -# shutdown-on-sigint default -# shutdown-on-sigterm default - -################ NON-DETERMINISTIC LONG BLOCKING COMMANDS ##################### - -# Maximum time in milliseconds for EVAL scripts, functions and in some cases -# modules' commands before Redis can start processing or rejecting other clients. -# -# If the maximum execution time is reached Redis will start to reply to most -# commands with a BUSY error. -# -# In this state Redis will only allow a handful of commands to be executed. -# For instance, SCRIPT KILL, FUNCTION KILL, SHUTDOWN NOSAVE and possibly some -# module specific 'allow-busy' commands. -# -# SCRIPT KILL and FUNCTION KILL will only be able to stop a script that did not -# yet call any write commands, so SHUTDOWN NOSAVE may be the only way to stop -# the server in the case a write command was already issued by the script when -# the user doesn't want to wait for the natural termination of the script. -# -# The default is 5 seconds. It is possible to set it to 0 or a negative value -# to disable this mechanism (uninterrupted execution). Note that in the past -# this config had a different name, which is now an alias, so both of these do -# the same: -# lua-time-limit 5000 -# busy-reply-threshold 5000 - -################################ REDIS CLUSTER ############################### - -# Normal Redis instances can't be part of a Redis Cluster; only nodes that are -# started as cluster nodes can. In order to start a Redis instance as a -# cluster node enable the cluster support uncommenting the following: -# -# cluster-enabled yes - -# Every cluster node has a cluster configuration file. This file is not -# intended to be edited by hand. It is created and updated by Redis nodes. -# Every Redis Cluster node requires a different cluster configuration file. -# Make sure that instances running in the same system do not have -# overlapping cluster configuration file names. -# -# cluster-config-file nodes-6379.conf - -# Cluster node timeout is the amount of milliseconds a node must be unreachable -# for it to be considered in failure state. -# Most other internal time limits are a multiple of the node timeout. -# -# cluster-node-timeout 15000 - -# The cluster port is the port that the cluster bus will listen for inbound connections on. When set -# to the default value, 0, it will be bound to the command port + 10000. Setting this value requires -# you to specify the cluster bus port when executing cluster meet. -# cluster-port 0 - -# A replica of a failing master will avoid to start a failover if its data -# looks too old. -# -# There is no simple way for a replica to actually have an exact measure of -# its "data age", so the following two checks are performed: -# -# 1) If there are multiple replicas able to failover, they exchange messages -# in order to try to give an advantage to the replica with the best -# replication offset (more data from the master processed). -# Replicas will try to get their rank by offset, and apply to the start -# of the failover a delay proportional to their rank. -# -# 2) Every single replica computes the time of the last interaction with -# its master. This can be the last ping or command received (if the master -# is still in the "connected" state), or the time that elapsed since the -# disconnection with the master (if the replication link is currently down). -# If the last interaction is too old, the replica will not try to failover -# at all. -# -# The point "2" can be tuned by user. Specifically a replica will not perform -# the failover if, since the last interaction with the master, the time -# elapsed is greater than: -# -# (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period -# -# So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor -# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the -# replica will not try to failover if it was not able to talk with the master -# for longer than 310 seconds. -# -# A large cluster-replica-validity-factor may allow replicas with too old data to failover -# a master, while a too small value may prevent the cluster from being able to -# elect a replica at all. -# -# For maximum availability, it is possible to set the cluster-replica-validity-factor -# to a value of 0, which means, that replicas will always try to failover the -# master regardless of the last time they interacted with the master. -# (However they'll always try to apply a delay proportional to their -# offset rank). -# -# Zero is the only value able to guarantee that when all the partitions heal -# the cluster will always be able to continue. -# -# cluster-replica-validity-factor 10 - -# Cluster replicas are able to migrate to orphaned masters, that are masters -# that are left without working replicas. This improves the cluster ability -# to resist to failures as otherwise an orphaned master can't be failed over -# in case of failure if it has no working replicas. -# -# Replicas migrate to orphaned masters only if there are still at least a -# given number of other working replicas for their old master. This number -# is the "migration barrier". A migration barrier of 1 means that a replica -# will migrate only if there is at least 1 other working replica for its master -# and so forth. It usually reflects the number of replicas you want for every -# master in your cluster. -# -# Default is 1 (replicas migrate only if their masters remain with at least -# one replica). To disable migration just set it to a very large value or -# set cluster-allow-replica-migration to 'no'. -# A value of 0 can be set but is useful only for debugging and dangerous -# in production. -# -# cluster-migration-barrier 1 - -# Turning off this option allows to use less automatic cluster configuration. -# It both disables migration to orphaned masters and migration from masters -# that became empty. -# -# Default is 'yes' (allow automatic migrations). -# -# cluster-allow-replica-migration yes - -# By default Redis Cluster nodes stop accepting queries if they detect there -# is at least a hash slot uncovered (no available node is serving it). -# This way if the cluster is partially down (for example a range of hash slots -# are no longer covered) all the cluster becomes, eventually, unavailable. -# It automatically returns available as soon as all the slots are covered again. -# -# However sometimes you want the subset of the cluster which is working, -# to continue to accept queries for the part of the key space that is still -# covered. In order to do so, just set the cluster-require-full-coverage -# option to no. -# -# cluster-require-full-coverage yes - -# This option, when set to yes, prevents replicas from trying to failover its -# master during master failures. However the replica can still perform a -# manual failover, if forced to do so. -# -# This is useful in different scenarios, especially in the case of multiple -# data center operations, where we want one side to never be promoted if not -# in the case of a total DC failure. -# -# cluster-replica-no-failover no - -# This option, when set to yes, allows nodes to serve read traffic while the -# cluster is in a down state, as long as it believes it owns the slots. -# -# This is useful for two cases. The first case is for when an application -# doesn't require consistency of data during node failures or network partitions. -# One example of this is a cache, where as long as the node has the data it -# should be able to serve it. -# -# The second use case is for configurations that don't meet the recommended -# three shards but want to enable cluster mode and scale later. A -# master outage in a 1 or 2 shard configuration causes a read/write outage to the -# entire cluster without this option set, with it set there is only a write outage. -# Without a quorum of masters, slot ownership will not change automatically. -# -# cluster-allow-reads-when-down no - -# This option, when set to yes, allows nodes to serve pubsub shard traffic while -# the cluster is in a down state, as long as it believes it owns the slots. -# -# This is useful if the application would like to use the pubsub feature even when -# the cluster global stable state is not OK. If the application wants to make sure only -# one shard is serving a given channel, this feature should be kept as yes. -# -# cluster-allow-pubsubshard-when-down yes - -# Cluster link send buffer limit is the limit on the memory usage of an individual -# cluster bus link's send buffer in bytes. Cluster links would be freed if they exceed -# this limit. This is to primarily prevent send buffers from growing unbounded on links -# toward slow peers (E.g. PubSub messages being piled up). -# This limit is disabled by default. Enable this limit when 'mem_cluster_links' INFO field -# and/or 'send-buffer-allocated' entries in the 'CLUSTER LINKS` command output continuously increase. -# Minimum limit of 1gb is recommended so that cluster link buffer can fit in at least a single -# PubSub message by default. (client-query-buffer-limit default value is 1gb) -# -# cluster-link-sendbuf-limit 0 - -# Clusters can configure their announced hostname using this config. This is a common use case for -# applications that need to use TLS Server Name Indication (SNI) or dealing with DNS based -# routing. By default this value is only shown as additional metadata in the CLUSTER SLOTS -# command, but can be changed using 'cluster-preferred-endpoint-type' config. This value is -# communicated along the clusterbus to all nodes, setting it to an empty string will remove -# the hostname and also propagate the removal. -# -# cluster-announce-hostname "" - -# Clusters can advertise how clients should connect to them using either their IP address, -# a user defined hostname, or by declaring they have no endpoint. Which endpoint is -# shown as the preferred endpoint is set by using the cluster-preferred-endpoint-type -# config with values 'ip', 'hostname', or 'unknown-endpoint'. This value controls how -# the endpoint returned for MOVED/ASKING requests as well as the first field of CLUSTER SLOTS. -# If the preferred endpoint type is set to hostname, but no announced hostname is set, a '?' -# will be returned instead. -# -# When a cluster advertises itself as having an unknown endpoint, it's indicating that -# the server doesn't know how clients can reach the cluster. This can happen in certain -# networking situations where there are multiple possible routes to the node, and the -# server doesn't know which one the client took. In this case, the server is expecting -# the client to reach out on the same endpoint it used for making the last request, but use -# the port provided in the response. -# -# cluster-preferred-endpoint-type ip - -# In order to setup your cluster make sure to read the documentation -# available at https://redis.io web site. - -########################## CLUSTER DOCKER/NAT support ######################## - -# In certain deployments, Redis Cluster nodes address discovery fails, because -# addresses are NAT-ted or because ports are forwarded (the typical case is -# Docker and other containers). -# -# In order to make Redis Cluster working in such environments, a static -# configuration where each node knows its public address is needed. The -# following four options are used for this scope, and are: -# -# * cluster-announce-ip -# * cluster-announce-port -# * cluster-announce-tls-port -# * cluster-announce-bus-port -# -# Each instructs the node about its address, client ports (for connections -# without and with TLS) and cluster message bus port. The information is then -# published in the header of the bus packets so that other nodes will be able to -# correctly map the address of the node publishing the information. -# -# If cluster-tls is set to yes and cluster-announce-tls-port is omitted or set -# to zero, then cluster-announce-port refers to the TLS port. Note also that -# cluster-announce-tls-port has no effect if cluster-tls is set to no. -# -# If the above options are not used, the normal Redis Cluster auto-detection -# will be used instead. -# -# Note that when remapped, the bus port may not be at the fixed offset of -# clients port + 10000, so you can specify any port and bus-port depending -# on how they get remapped. If the bus-port is not set, a fixed offset of -# 10000 will be used as usual. -# -# Example: -# -# cluster-announce-ip 10.1.1.5 -# cluster-announce-tls-port 6379 -# cluster-announce-port 0 -# cluster-announce-bus-port 6380 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -################################ LATENCY MONITOR ############################## - -# The Redis latency monitoring subsystem samples different operations -# at runtime in order to collect data related to possible sources of -# latency of a Redis instance. -# -# Via the LATENCY command this information is available to the user that can -# print graphs and obtain reports. -# -# The system only logs operations that were performed in a time equal or -# greater than the amount of milliseconds specified via the -# latency-monitor-threshold configuration directive. When its value is set -# to zero, the latency monitor is turned off. -# -# By default latency monitoring is disabled since it is mostly not needed -# if you don't have latency issues, and collecting data has a performance -# impact, that while very small, can be measured under big load. Latency -# monitoring can easily be enabled at runtime using the command -# "CONFIG SET latency-monitor-threshold " if needed. -latency-monitor-threshold 0 - -################################ LATENCY TRACKING ############################## - -# The Redis extended latency monitoring tracks the per command latencies and enables -# exporting the percentile distribution via the INFO latencystats command, -# and cumulative latency distributions (histograms) via the LATENCY command. -# -# By default, the extended latency monitoring is enabled since the overhead -# of keeping track of the command latency is very small. -# latency-tracking yes - -# By default the exported latency percentiles via the INFO latencystats command -# are the p50, p99, and p999. -# latency-tracking-info-percentiles 50 99 99.9 - -############################# EVENT NOTIFICATION ############################## - -# Redis can notify Pub/Sub clients about events happening in the key space. -# This feature is documented at https://redis.io/topics/notifications -# -# For instance if keyspace events notification is enabled, and a client -# performs a DEL operation on key "foo" stored in the Database 0, two -# messages will be published via Pub/Sub: -# -# PUBLISH __keyspace@0__:foo del -# PUBLISH __keyevent@0__:del foo -# -# It is possible to select the events that Redis will notify among a set -# of classes. Every class is identified by a single character: -# -# K Keyspace events, published with __keyspace@__ prefix. -# E Keyevent events, published with __keyevent@__ prefix. -# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... -# $ String commands -# l List commands -# s Set commands -# h Hash commands -# z Sorted set commands -# x Expired events (events generated every time a key expires) -# e Evicted events (events generated when a key is evicted for maxmemory) -# n New key events (Note: not included in the 'A' class) -# t Stream commands -# d Module key type events -# m Key-miss events (Note: It is not included in the 'A' class) -# A Alias for g$lshzxetd, so that the "AKE" string means all the events -# (Except key-miss events which are excluded from 'A' due to their -# unique nature). -# -# The "notify-keyspace-events" takes as argument a string that is composed -# of zero or multiple characters. The empty string means that notifications -# are disabled. -# -# Example: to enable list and generic events, from the point of view of the -# event name, use: -# -# notify-keyspace-events Elg -# -# Example 2: to get the stream of the expired keys subscribing to channel -# name __keyevent@0__:expired use: -# -# notify-keyspace-events Ex -# -# By default all notifications are disabled because most users don't need -# this feature and the feature has some overhead. Note that if you don't -# specify at least one of K or E, no events will be delivered. -notify-keyspace-events "" - -############################### ADVANCED CONFIG ############################### - -# Hashes are encoded using a memory efficient data structure when they have a -# small number of entries, and the biggest entry does not exceed a given -# threshold. These thresholds can be configured using the following directives. -hash-max-listpack-entries 512 -hash-max-listpack-value 64 - -# Lists are also encoded in a special way to save a lot of space. -# The number of entries allowed per internal list node can be specified -# as a fixed maximum size or a maximum number of elements. -# For a fixed maximum size, use -5 through -1, meaning: -# -5: max size: 64 Kb <-- not recommended for normal workloads -# -4: max size: 32 Kb <-- not recommended -# -3: max size: 16 Kb <-- probably not recommended -# -2: max size: 8 Kb <-- good -# -1: max size: 4 Kb <-- good -# Positive numbers mean store up to _exactly_ that number of elements -# per list node. -# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), -# but if your use case is unique, adjust the settings as necessary. -list-max-listpack-size -2 - -# Lists may also be compressed. -# Compress depth is the number of quicklist ziplist nodes from *each* side of -# the list to *exclude* from compression. The head and tail of the list -# are always uncompressed for fast push/pop operations. Settings are: -# 0: disable all list compression -# 1: depth 1 means "don't start compressing until after 1 node into the list, -# going from either the head or tail" -# So: [head]->node->node->...->node->[tail] -# [head], [tail] will always be uncompressed; inner nodes will compress. -# 2: [head]->[next]->node->node->...->node->[prev]->[tail] -# 2 here means: don't compress head or head->next or tail->prev or tail, -# but compress all nodes between them. -# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] -# etc. -list-compress-depth 0 - -# Sets have a special encoding in just one case: when a set is composed -# of just strings that happen to be integers in radix 10 in the range -# of 64 bit signed integers. -# The following configuration setting sets the limit in the size of the -# set in order to use this special memory saving encoding. -set-max-intset-entries 512 - -# Similarly to hashes and lists, sorted sets are also specially encoded in -# order to save a lot of space. This encoding is only used when the length and -# elements of a sorted set are below the following limits: -zset-max-listpack-entries 128 -zset-max-listpack-value 64 - -# HyperLogLog sparse representation bytes limit. The limit includes the -# 16 bytes header. When an HyperLogLog using the sparse representation crosses -# this limit, it is converted into the dense representation. -# -# A value greater than 16000 is totally useless, since at that point the -# dense representation is more memory efficient. -# -# The suggested value is ~ 3000 in order to have the benefits of -# the space efficient encoding without slowing down too much PFADD, -# which is O(N) with the sparse encoding. The value can be raised to -# ~ 10000 when CPU is not a concern, but space is, and the data set is -# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. -hll-sparse-max-bytes 3000 - -# Streams macro node max size / items. The stream data structure is a radix -# tree of big nodes that encode multiple items inside. Using this configuration -# it is possible to configure how big a single node can be in bytes, and the -# maximum number of items it may contain before switching to a new node when -# appending new stream entries. If any of the following settings are set to -# zero, the limit is ignored, so for instance it is possible to set just a -# max entries limit by setting max-bytes to 0 and max-entries to the desired -# value. -stream-node-max-bytes 4096 -stream-node-max-entries 100 - -# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in -# order to help rehashing the main Redis hash table (the one mapping top-level -# keys to values). The hash table implementation Redis uses (see dict.c) -# performs a lazy rehashing: the more operation you run into a hash table -# that is rehashing, the more rehashing "steps" are performed, so if the -# server is idle the rehashing is never complete and some more memory is used -# by the hash table. -# -# The default is to use this millisecond 10 times every second in order to -# actively rehash the main dictionaries, freeing memory when possible. -# -# If unsure: -# use "activerehashing no" if you have hard latency requirements and it is -# not a good thing in your environment that Redis can reply from time to time -# to queries with 2 milliseconds delay. -# -# use "activerehashing yes" if you don't have such hard requirements but -# want to free memory asap when possible. -activerehashing yes - -# The client output buffer limits can be used to force disconnection of clients -# that are not reading data from the server fast enough for some reason (a -# common reason is that a Pub/Sub client can't consume messages as fast as the -# publisher can produce them). -# -# The limit can be set differently for the three different classes of clients: -# -# normal -> normal clients including MONITOR clients -# replica -> replica clients -# pubsub -> clients subscribed to at least one pubsub channel or pattern -# -# The syntax of every client-output-buffer-limit directive is the following: -# -# client-output-buffer-limit -# -# A client is immediately disconnected once the hard limit is reached, or if -# the soft limit is reached and remains reached for the specified number of -# seconds (continuously). -# So for instance if the hard limit is 32 megabytes and the soft limit is -# 16 megabytes / 10 seconds, the client will get disconnected immediately -# if the size of the output buffers reach 32 megabytes, but will also get -# disconnected if the client reaches 16 megabytes and continuously overcomes -# the limit for 10 seconds. -# -# By default normal clients are not limited because they don't receive data -# without asking (in a push way), but just after a request, so only -# asynchronous clients may create a scenario where data is requested faster -# than it can read. -# -# Instead there is a default limit for pubsub and replica clients, since -# subscribers and replicas receive data in a push fashion. -# -# Note that it doesn't make sense to set the replica clients output buffer -# limit lower than the repl-backlog-size config (partial sync will succeed -# and then replica will get disconnected). -# Such a configuration is ignored (the size of repl-backlog-size will be used). -# This doesn't have memory consumption implications since the replica client -# will share the backlog buffers memory. -# -# Both the hard or the soft limit can be disabled by setting them to zero. -client-output-buffer-limit normal 0 0 0 -client-output-buffer-limit replica 256mb 64mb 60 -client-output-buffer-limit pubsub 32mb 8mb 60 - -# Client query buffers accumulate new commands. They are limited to a fixed -# amount by default in order to avoid that a protocol desynchronization (for -# instance due to a bug in the client) will lead to unbound memory usage in -# the query buffer. However you can configure it here if you have very special -# needs, such us huge multi/exec requests or alike. -# -# client-query-buffer-limit 1gb - -# In some scenarios client connections can hog up memory leading to OOM -# errors or data eviction. To avoid this we can cap the accumulated memory -# used by all client connections (all pubsub and normal clients). Once we -# reach that limit connections will be dropped by the server freeing up -# memory. The server will attempt to drop the connections using the most -# memory first. We call this mechanism "client eviction". -# -# Client eviction is configured using the maxmemory-clients setting as follows: -# 0 - client eviction is disabled (default) -# -# A memory value can be used for the client eviction threshold, -# for example: -# maxmemory-clients 1g -# -# A percentage value (between 1% and 100%) means the client eviction threshold -# is based on a percentage of the maxmemory setting. For example to set client -# eviction at 5% of maxmemory: -# maxmemory-clients 5% - -# In the Redis protocol, bulk requests, that are, elements representing single -# strings, are normally limited to 512 mb. However you can change this limit -# here, but must be 1mb or greater -# -# proto-max-bulk-len 512mb - -# Redis calls an internal function to perform many background tasks, like -# closing connections of clients in timeout, purging expired keys that are -# never requested, and so forth. -# -# Not all tasks are performed with the same frequency, but Redis checks for -# tasks to perform according to the specified "hz" value. -# -# By default "hz" is set to 10. Raising the value will use more CPU when -# Redis is idle, but at the same time will make Redis more responsive when -# there are many keys expiring at the same time, and timeouts may be -# handled with more precision. -# -# The range is between 1 and 500, however a value over 100 is usually not -# a good idea. Most users should use the default of 10 and raise this up to -# 100 only in environments where very low latency is required. -hz 10 - -# Normally it is useful to have an HZ value which is proportional to the -# number of clients connected. This is useful in order, for instance, to -# avoid too many clients are processed for each background task invocation -# in order to avoid latency spikes. -# -# Since the default HZ value by default is conservatively set to 10, Redis -# offers, and enables by default, the ability to use an adaptive HZ value -# which will temporarily raise when there are many connected clients. -# -# When dynamic HZ is enabled, the actual configured HZ will be used -# as a baseline, but multiples of the configured HZ value will be actually -# used as needed once more clients are connected. In this way an idle -# instance will use very little CPU time while a busy instance will be -# more responsive. -dynamic-hz yes - -# When a child rewrites the AOF file, if the following option is enabled -# the file will be fsync-ed every 4 MB of data generated. This is useful -# in order to commit the file to the disk more incrementally and avoid -# big latency spikes. -aof-rewrite-incremental-fsync yes - -# When redis saves RDB file, if the following option is enabled -# the file will be fsync-ed every 4 MB of data generated. This is useful -# in order to commit the file to the disk more incrementally and avoid -# big latency spikes. -rdb-save-incremental-fsync yes - -# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good -# idea to start with the default settings and only change them after investigating -# how to improve the performances and how the keys LFU change over time, which -# is possible to inspect via the OBJECT FREQ command. -# -# There are two tunable parameters in the Redis LFU implementation: the -# counter logarithm factor and the counter decay time. It is important to -# understand what the two parameters mean before changing them. -# -# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis -# uses a probabilistic increment with logarithmic behavior. Given the value -# of the old counter, when a key is accessed, the counter is incremented in -# this way: -# -# 1. A random number R between 0 and 1 is extracted. -# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). -# 3. The counter is incremented only if R < P. -# -# The default lfu-log-factor is 10. This is a table of how the frequency -# counter changes with a different number of accesses with different -# logarithmic factors: -# -# +--------+------------+------------+------------+------------+------------+ -# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | -# +--------+------------+------------+------------+------------+------------+ -# | 0 | 104 | 255 | 255 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 1 | 18 | 49 | 255 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 10 | 10 | 18 | 142 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 100 | 8 | 11 | 49 | 143 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# -# NOTE: The above table was obtained by running the following commands: -# -# redis-benchmark -n 1000000 incr foo -# redis-cli object freq foo -# -# NOTE 2: The counter initial value is 5 in order to give new objects a chance -# to accumulate hits. -# -# The counter decay time is the time, in minutes, that must elapse in order -# for the key counter to be divided by two (or decremented if it has a value -# less <= 10). -# -# The default value for the lfu-decay-time is 1. A special value of 0 means to -# decay the counter every time it happens to be scanned. -# -# lfu-log-factor 10 -# lfu-decay-time 1 - -########################### ACTIVE DEFRAGMENTATION ####################### -# -# What is active defragmentation? -# ------------------------------- -# -# Active (online) defragmentation allows a Redis server to compact the -# spaces left between small allocations and deallocations of data in memory, -# thus allowing to reclaim back memory. -# -# Fragmentation is a natural process that happens with every allocator (but -# less so with Jemalloc, fortunately) and certain workloads. Normally a server -# restart is needed in order to lower the fragmentation, or at least to flush -# away all the data and create it again. However thanks to this feature -# implemented by Oran Agra for Redis 4.0 this process can happen at runtime -# in a "hot" way, while the server is running. -# -# Basically when the fragmentation is over a certain level (see the -# configuration options below) Redis will start to create new copies of the -# values in contiguous memory regions by exploiting certain specific Jemalloc -# features (in order to understand if an allocation is causing fragmentation -# and to allocate it in a better place), and at the same time, will release the -# old copies of the data. This process, repeated incrementally for all the keys -# will cause the fragmentation to drop back to normal values. -# -# Important things to understand: -# -# 1. This feature is disabled by default, and only works if you compiled Redis -# to use the copy of Jemalloc we ship with the source code of Redis. -# This is the default with Linux builds. -# -# 2. You never need to enable this feature if you don't have fragmentation -# issues. -# -# 3. Once you experience fragmentation, you can enable this feature when -# needed with the command "CONFIG SET activedefrag yes". -# -# The configuration parameters are able to fine tune the behavior of the -# defragmentation process. If you are not sure about what they mean it is -# a good idea to leave the defaults untouched. - -# Active defragmentation is disabled by default -# activedefrag no - -# Minimum amount of fragmentation waste to start active defrag -# active-defrag-ignore-bytes 100mb - -# Minimum percentage of fragmentation to start active defrag -# active-defrag-threshold-lower 10 - -# Maximum percentage of fragmentation at which we use maximum effort -# active-defrag-threshold-upper 100 - -# Minimal effort for defrag in CPU percentage, to be used when the lower -# threshold is reached -# active-defrag-cycle-min 1 - -# Maximal effort for defrag in CPU percentage, to be used when the upper -# threshold is reached -# active-defrag-cycle-max 25 - -# Maximum number of set/hash/zset/list fields that will be processed from -# the main dictionary scan -# active-defrag-max-scan-fields 1000 - -# Jemalloc background thread for purging will be enabled by default -jemalloc-bg-thread yes - -# It is possible to pin different threads and processes of Redis to specific -# CPUs in your system, in order to maximize the performances of the server. -# This is useful both in order to pin different Redis threads in different -# CPUs, but also in order to make sure that multiple Redis instances running -# in the same host will be pinned to different CPUs. -# -# Normally you can do this using the "taskset" command, however it is also -# possible to this via Redis configuration directly, both in Linux and FreeBSD. -# -# You can pin the server/IO threads, bio threads, aof rewrite child process, and -# the bgsave child process. The syntax to specify the cpu list is the same as -# the taskset command: -# -# Set redis server/io threads to cpu affinity 0,2,4,6: -# server_cpulist 0-7:2 -# -# Set bio threads to cpu affinity 1,3: -# bio_cpulist 1,3 -# -# Set aof rewrite child process to cpu affinity 8,9,10,11: -# aof_rewrite_cpulist 8-11 -# -# Set bgsave child process to cpu affinity 1,10,11 -# bgsave_cpulist 1,10-11 - -# In some cases redis will emit warnings and even refuse to start if it detects -# that the system is in bad state, it is possible to suppress these warnings -# by setting the following config which takes a space delimited list of warnings -# to suppress -# -# ignore-warnings ARM64-COW-BUG diff --git a/warm/shell b/warm/shell deleted file mode 100755 index fee40786a..000000000 --- a/warm/shell +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash - -echo '--network=host node:14.17.5 /bin/bash'