diff --git a/.asf.yaml b/.asf.yaml new file mode 100644 index 0000000000000..ad1e99e2a4d56 --- /dev/null +++ b/.asf.yaml @@ -0,0 +1,48 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +github: + description: The Cloud-Native API Gateway + homepage: https://apisix.apache.org/ + labels: + - api-gateway + - cloud-native + - nginx + - lua + - luajit + - apigateway + - microservices + - api + - loadbalancing + - reverse-proxy + - api-management + - apisix + - serverless + - iot + - devops + - kubernetes + - docker + + enabled_merge_buttons: + squash: true + merge: false + rebase: false + + notifications: + commits: notifications@apisix.apache.org + issues: notifications@apisix.apache.org + pullrequests: notifications@apisix.apache.org diff --git a/.travis.yml b/.travis.yml index d33d27cc2bce7..ddc6b898c1b0a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,4 @@ -dist: xenial +dist: bionic sudo: required matrix: diff --git a/.travis/apisix_cli_test.sh b/.travis/apisix_cli_test.sh index d67c7f837b7d2..ca31995c73f25 100755 --- a/.travis/apisix_cli_test.sh +++ b/.travis/apisix_cli_test.sh @@ -23,6 +23,8 @@ set -ex +git checkout conf/config.yaml + # check whether the 'reuseport' is in nginx.conf . make init @@ -72,3 +74,78 @@ done sed -i '/dns_resolver:/,+4s/^#//' conf/config.yaml echo "passed: system nameserver imported" + +# enable enable_dev_mode +sed -i 's/enable_dev_mode: false/enable_dev_mode: true/g' conf/config.yaml + +make init + +count=`grep -c "worker_processes 1;" conf/nginx.conf` +if [ $count -ne 1 ]; then + echo "failed: worker_processes is not 1 when enable enable_dev_mode" + exit 1 +fi + +count=`grep -c "listen 9080.*reuseport" conf/nginx.conf || true` +if [ $count -ne 0 ]; then + echo "failed: reuseport should be disabled when enable enable_dev_mode" + exit 1 +fi + +git checkout conf/config.yaml + +# check whether the 'worker_cpu_affinity' is in nginx.conf . + +make init + +grep -E "worker_cpu_affinity" conf/nginx.conf > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: nginx.conf file is missing worker_cpu_affinity configuration" + exit 1 +fi + +echo "passed: nginx.conf file contains worker_cpu_affinity configuration" + +# check admin https enabled + +sed -i 's/\# port_admin: 9180/port_admin: 9180/' conf/config.yaml +sed -i 's/\# https_admin: true/https_admin: true/' conf/config.yaml + +make init + +grep "listen 9180 ssl" conf/nginx.conf > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: failed to enabled https for admin" + exit 1 +fi + +make run + +code=$(curl -k -i -m 20 -o /dev/null -s -w %{http_code} https://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +if [ ! $code -eq 200 ]; then + echo "failed: failed to enabled https for admin" + exit 1 +fi + +echo "passed: admin https enabled" + +# rollback to the default + +make stop + +sed -i 's/port_admin: 9180/\# port_admin: 9180/' conf/config.yaml +sed -i 's/https_admin: true/\# https_admin: true/' conf/config.yaml + +make init + +set +ex + +grep "listen 9180 ssl" conf/nginx.conf > /dev/null +if [ ! $? -eq 1 ]; then + echo "failed: failed to rollback to the default admin config" + exit 1 +fi + +set -ex + +echo "passed: rollback to the default admin config" diff --git a/.travis/linux_apisix_current_luarocks_runner.sh b/.travis/linux_apisix_current_luarocks_runner.sh index b67e115fa7f53..0264fc5ba826d 100755 --- a/.travis/linux_apisix_current_luarocks_runner.sh +++ b/.travis/linux_apisix_current_luarocks_runner.sh @@ -47,6 +47,11 @@ script() { export PATH=$OPENRESTY_PREFIX/nginx/sbin:$OPENRESTY_PREFIX/luajit/bin:$OPENRESTY_PREFIX/bin:$PATH openresty -V sudo service etcd start + sudo service etcd stop + mkdir -p ~/etcd-data + /usr/bin/etcd --listen-client-urls 'http://0.0.0.0:2379' --advertise-client-urls='http://0.0.0.0:2379' --data-dir ~/etcd-data > /dev/null 2>&1 & + etcd --version + sleep 5 sudo rm -rf /usr/local/apisix diff --git a/.travis/linux_apisix_master_luarocks_runner.sh b/.travis/linux_apisix_master_luarocks_runner.sh index 2c76087fa20b8..7705c97559eae 100755 --- a/.travis/linux_apisix_master_luarocks_runner.sh +++ b/.travis/linux_apisix_master_luarocks_runner.sh @@ -20,6 +20,7 @@ set -ex export_or_prefix() { export OPENRESTY_PREFIX="/usr/local/openresty-debug" + export APISIX_MAIN="https://raw.githubusercontent.com/apache/incubator-apisix/master/rockspec/apisix-master-0.rockspec" } do_install() { @@ -46,7 +47,11 @@ script() { export_or_prefix export PATH=$OPENRESTY_PREFIX/nginx/sbin:$OPENRESTY_PREFIX/luajit/bin:$OPENRESTY_PREFIX/bin:$PATH openresty -V - sudo service etcd start + sudo service etcd stop + mkdir -p ~/etcd-data + /usr/bin/etcd --listen-client-urls 'http://0.0.0.0:2379' --advertise-client-urls='http://0.0.0.0:2379' --data-dir ~/etcd-data > /dev/null 2>&1 & + etcd --version + sleep 5 sudo rm -rf /usr/local/apisix @@ -62,7 +67,7 @@ script() { sudo PATH=$PATH ./utils/install-apisix.sh remove > build.log 2>&1 || (cat build.log && exit 1) # install APISIX by luarocks - sudo luarocks install rockspec/apisix-master-0.rockspec > build.log 2>&1 || (cat build.log && exit 1) + sudo luarocks install $APISIX_MAIN > build.log 2>&1 || (cat build.log && exit 1) # show install files luarocks show apisix diff --git a/.travis/linux_openresty_runner.sh b/.travis/linux_openresty_runner.sh index 384d10ec4a824..86505cfce3c62 100755 --- a/.travis/linux_openresty_runner.sh +++ b/.travis/linux_openresty_runner.sh @@ -37,12 +37,17 @@ before_install() { sudo cpanm --notest Test::Nginx >build.log 2>&1 || (cat build.log && exit 1) docker pull redis:3.0-alpine docker run --rm -itd -p 6379:6379 --name apisix_redis redis:3.0-alpine + docker run --rm -itd -e HTTP_PORT=8888 -e HTTPS_PORT=9999 -p 8888:8888 -p 9999:9999 mendhak/http-https-echo + # Runs Keycloak version 10.0.2 with inbuilt policies for unit tests + docker run --rm -itd -e KEYCLOAK_USER=admin -e KEYCLOAK_PASSWORD=123456 -p 8090:8080 sshniro/keycloak-apisix # spin up kafka cluster for tests (1 zookeper and 1 kafka instance) docker pull bitnami/zookeeper:3.6.0 docker pull bitnami/kafka:latest docker network create kafka-net --driver bridge docker run --name zookeeper-server -d -p 2181:2181 --network kafka-net -e ALLOW_ANONYMOUS_LOGIN=yes bitnami/zookeeper:3.6.0 docker run --name kafka-server1 -d --network kafka-net -e ALLOW_PLAINTEXT_LISTENER=yes -e KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper-server:2181 -e KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092 -p 9092:9092 -e KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true bitnami/kafka:latest + docker pull bitinit/eureka + docker run --name eureka -d -p 8761:8761 --env ENVIRONMENT=apisix --env spring.application.name=apisix-eureka --env server.port=8761 --env eureka.instance.ip-address=127.0.0.1 --env eureka.client.registerWithEureka=true --env eureka.client.fetchRegistry=false --env eureka.client.serviceUrl.defaultZone=http://127.0.0.1:8761/eureka/ bitinit/eureka sleep 5 docker exec -it kafka-server1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server:2181 --replication-factor 1 --partitions 1 --topic test2 } @@ -123,7 +128,11 @@ script() { export_or_prefix export PATH=$OPENRESTY_PREFIX/nginx/sbin:$OPENRESTY_PREFIX/luajit/bin:$OPENRESTY_PREFIX/bin:$PATH openresty -V - sudo service etcd start + sudo service etcd stop + mkdir -p ~/etcd-data + /usr/bin/etcd --listen-client-urls 'http://0.0.0.0:2379' --advertise-client-urls='http://0.0.0.0:2379' --data-dir ~/etcd-data > /dev/null 2>&1 & + etcd --version + sleep 5 ./build-cache/grpc_server_example & @@ -142,7 +151,7 @@ script() { sleep 1 make lint && make license-check || exit 1 - APISIX_ENABLE_LUACOV=1 prove -Itest-nginx/lib -r t + APISIX_ENABLE_LUACOV=1 PERL5LIB=.:$PERL5LIB prove -Itest-nginx/lib -r t } after_success() { diff --git a/.travis/linux_tengine_runner.sh b/.travis/linux_tengine_runner.sh index 45a9ec448e298..fb9b6fd657242 100755 --- a/.travis/linux_tengine_runner.sh +++ b/.travis/linux_tengine_runner.sh @@ -38,12 +38,17 @@ before_install() { sudo cpanm --notest Test::Nginx >build.log 2>&1 || (cat build.log && exit 1) docker pull redis:3.0-alpine docker run --rm -itd -p 6379:6379 --name apisix_redis redis:3.0-alpine + docker run --rm -itd -e HTTP_PORT=8888 -e HTTPS_PORT=9999 -p 8888:8888 -p 9999:9999 mendhak/http-https-echo + # Runs Keycloak version 10.0.2 with inbuilt policies for unit tests + docker run --rm -itd -e KEYCLOAK_USER=admin -e KEYCLOAK_PASSWORD=123456 -p 8090:8080 sshniro/keycloak-apisix # spin up kafka cluster for tests (1 zookeper and 1 kafka instance) docker pull bitnami/zookeeper:3.6.0 docker pull bitnami/kafka:latest docker network create kafka-net --driver bridge docker run --name zookeeper-server -d -p 2181:2181 --network kafka-net -e ALLOW_ANONYMOUS_LOGIN=yes bitnami/zookeeper:3.6.0 docker run --name kafka-server1 -d --network kafka-net -e ALLOW_PLAINTEXT_LISTENER=yes -e KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper-server:2181 -e KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092 -p 9092:9092 -e KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true bitnami/kafka:latest + docker pull bitinit/eureka + docker run --name eureka -d -p 8761:8761 --env ENVIRONMENT=apisix --env spring.application.name=apisix-eureka --env server.port=8761 --env eureka.instance.ip-address=127.0.0.1 --env eureka.client.registerWithEureka=true --env eureka.client.fetchRegistry=false --env eureka.client.serviceUrl.defaultZone=http://127.0.0.1:8761/eureka/ bitinit/eureka sleep 5 docker exec -it kafka-server1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server:2181 --replication-factor 1 --partitions 1 --topic test2 } @@ -266,7 +271,11 @@ script() { export_or_prefix export PATH=$OPENRESTY_PREFIX/nginx/sbin:$OPENRESTY_PREFIX/luajit/bin:$OPENRESTY_PREFIX/bin:$PATH openresty -V - sudo service etcd start + sudo service etcd stop + mkdir -p ~/etcd-data + /usr/bin/etcd --listen-client-urls 'http://0.0.0.0:2379' --advertise-client-urls='http://0.0.0.0:2379' --data-dir ~/etcd-data > /dev/null 2>&1 & + etcd --version + sleep 5 ./build-cache/grpc_server_example & @@ -279,7 +288,7 @@ script() { ./bin/apisix stop sleep 1 make lint && make license-check || exit 1 - APISIX_ENABLE_LUACOV=1 prove -Itest-nginx/lib -r t + APISIX_ENABLE_LUACOV=1 PERL5LIB=.:$PERL5LIB prove -Itest-nginx/lib -r t } after_success() { diff --git a/.travis/osx_openresty_runner.sh b/.travis/osx_openresty_runner.sh index 1cfce27285859..0f60eb987b406 100755 --- a/.travis/osx_openresty_runner.sh +++ b/.travis/osx_openresty_runner.sh @@ -43,7 +43,7 @@ do_install() { git clone https://github.com/iresty/test-nginx.git test-nginx wget -P utils https://raw.githubusercontent.com/openresty/openresty-devel-utils/master/lj-releng - chmod a+x utils/lj-releng + chmod a+x utils/lj-releng wget https://github.com/iresty/grpc_server_example/releases/download/20200314/grpc_server_example-darwin-amd64.tar.gz tar -xvf grpc_server_example-darwin-amd64.tar.gz diff --git a/CHANGELOG.md b/CHANGELOG.md index 5e5494df6f292..8770248b0d354 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,8 @@ # Table of Contents +- [1.4.0](#140) +- [1.3.0](#130) - [1.2.0](#120) - [1.1.0](#110) - [1.0.0](#100) @@ -27,6 +29,37 @@ - [0.7.0](#070) - [0.6.0](#060) +## 1.4.0 + +### Core +- Admin API: Support unique names for routes [1655](https://github.com/apache/incubator-apisix/pull/1655) +- Optimization of log buffer size and flush time [1570](https://github.com/apache/incubator-apisix/pull/1570) + +### New plugins +- :sunrise: **Apache Skywalking plugin** [1241](https://github.com/apache/incubator-apisix/pull/1241) +- :sunrise: **Keycloak Identity Server Plugin** [1701](https://github.com/apache/incubator-apisix/pull/1701) +- :sunrise: **Echo Plugin** [1632](https://github.com/apache/incubator-apisix/pull/1632) +- :sunrise: **Consume Restriction Plugin** [1437](https://github.com/apache/incubator-apisix/pull/1437) + +### Improvements +- Batch Request : Copy all headers to every request [1697](https://github.com/apache/incubator-apisix/pull/1697) +- SSL private key encryption [1678](https://github.com/apache/incubator-apisix/pull/1678) +- Improvement of docs for multiple plugins + + +## 1.3.0 + +The 1.3 version is mainly for security update. + +### Security +- reject invalid header[#1462](https://github.com/apache/incubator-apisix/pull/1462) and uri safe encode[#1461](https://github.com/apache/incubator-apisix/pull/1461) +- only allow 127.0.0.1 access admin API and dashboard by default. [#1458](https://github.com/apache/incubator-apisix/pull/1458) + +### Plugin +- :sunrise: **add batch request plugin**. [#1388](https://github.com/apache/incubator-apisix/pull/1388) +- implemented plugin `sys logger`. [#1414](https://github.com/apache/incubator-apisix/pull/1414) + + ## 1.2.0 The 1.2 version brings many new features, including core and plugins. diff --git a/CHANGELOG_CN.md b/CHANGELOG_CN.md index 8e19e84721ea4..d64e260295b84 100644 --- a/CHANGELOG_CN.md +++ b/CHANGELOG_CN.md @@ -19,6 +19,8 @@ # Table of Contents +- [1.4.0](#140) +- [1.3.0](#130) - [1.2.0](#120) - [1.1.0](#110) - [1.0.0](#100) @@ -27,6 +29,36 @@ - [0.7.0](#070) - [0.6.0](#060) +## 1.4.0 + +### Core +- Admin API: 路由支持唯一 name 字段 [1655](https://github.com/apache/incubator-apisix/pull/1655) +- 优化 log 缓冲区大小和刷新时间 [1570](https://github.com/apache/incubator-apisix/pull/1570) + +### New plugins +- :sunrise: **Apache Skywalking plugin** [1241](https://github.com/apache/incubator-apisix/pull/1241) +- :sunrise: **Keycloak Identity Server Plugin** [1701](https://github.com/apache/incubator-apisix/pull/1701) +- :sunrise: **Echo Plugin** [1632](https://github.com/apache/incubator-apisix/pull/1632) +- :sunrise: **Consume Restriction Plugin** [1437](https://github.com/apache/incubator-apisix/pull/1437) + +### Improvements +- Batch Request : 对每个请求拷贝头 [1697](https://github.com/apache/incubator-apisix/pull/1697) +- SSL 私钥加密 [1678](https://github.com/apache/incubator-apisix/pull/1678) +- 众多插件文档改善 + +## 1.3.0 + +1.3 版本主要带来安全更新。 + +## Security +- 拒绝无效的 header [#1462](https://github.com/apache/incubator-apisix/pull/1462) 并对 uri 进行安全编码 [#1461](https://github.com/apache/incubator-apisix/pull/1461) +- 默认只允许本地环回地址 127.0.0.1 访问 admin API 和 dashboard. [#1458](https://github.com/apache/incubator-apisix/pull/1458) + +### Plugin +- :sunrise: **新增 batch request 插件**. [#1388](https://github.com/apache/incubator-apisix/pull/1388) +- 实现完成 `sys logger` 插件. [#1414](https://github.com/apache/incubator-apisix/pull/1414) + + ## 1.2.0 1.2 版本在内核以及插件上带来了非常多的更新。 diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000..732f5ae2eb464 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,127 @@ + + +*The following is copied for your convenience from . If there's a discrepancy between the two, let us know or submit a PR to fix it.* + +# Code of Conduct # + +## Introduction ## + +This code of conduct applies to all spaces managed by the Apache +Software Foundation, including IRC, all public and private mailing +lists, issue trackers, wikis, blogs, Twitter, and any other +communication channel used by our communities. A code of conduct which +is specific to in-person events (ie., conferences) is codified in the +published ASF anti-harassment policy. + +We expect this code of conduct to be honored by everyone who +participates in the Apache community formally or informally, or claims +any affiliation with the Foundation, in any Foundation-related +activities and especially when representing the ASF, in any role. + +This code __is not exhaustive or complete__. It serves to distill our +common understanding of a collaborative, shared environment and goals. +We expect it to be followed in spirit as much as in the letter, so that +it can enrich all of us and the technical communities in which we participate. + +## Specific Guidelines ## + +We strive to: + + +1. __Be open.__ We invite anyone to participate in our community. We preferably use public methods of communication for project-related messages, unless discussing something sensitive. This applies to messages for help or project-related support, too; not only is a public support request much more likely to result in an answer to a question, it also makes sure that any inadvertent mistakes made by people answering will be more easily detected and corrected. + +2. __Be `empathetic`, welcoming, friendly, and patient.__ We work together to resolve conflict, assume good intentions, and do our best to act in an empathetic fashion. We may all experience some frustration from time to time, but we do not allow frustration to turn into a personal attack. A community where people feel uncomfortable or threatened is not a productive one. We should be respectful when dealing with other community members as well as with people outside our community. + +3. __Be collaborative.__ Our work will be used by other people, and in turn we will depend on the work of others. When we make something for the benefit of the project, we are willing to explain to others how it works, so that they can build on the work to make it even better. Any decision we make will affect users and colleagues, and we take those consequences seriously when making decisions. + +4. __Be inquisitive.__ Nobody knows everything! Asking questions early avoids many problems later, so questions are encouraged, though they may be directed to the appropriate forum. Those who are asked should be responsive and helpful, within the context of our shared goal of improving Apache project code. + +5. __Be careful in the words that we choose.__ Whether we are participating as professionals or volunteers, we value professionalism in all interactions, and take responsibility for our own speech. Be kind to others. Do not insult or put down other participants. Harassment and other exclusionary behaviour are not acceptable. This includes, but is not limited to: + + * Violent threats or language directed against another person. + * Sexist, racist, or otherwise discriminatory jokes and language. + * Posting sexually explicit or violent material. + * Posting (or threatening to post) other people's personally identifying information ("doxing"). + * Sharing private content, such as emails sent privately or non-publicly, or unlogged forums such as IRC channel history. + * Personal insults, especially those using racist or sexist terms. + * Unwelcome sexual attention. + * Excessive or unnecessary profanity. + * Repeated harassment of others. In general, if someone asks you to stop, then stop. + * Advocating for, or encouraging, any of the above behaviour. + +6. __Be concise.__ Keep in mind that what you write once will be read by hundreds of persons. Writing a short email means people can understand the conversation as efficiently as possible. Short emails should always strive to be empathetic, welcoming, friendly and patient. When a long explanation is necessary, consider adding a summary.

+ + Try to bring new ideas to a conversation so that each mail adds something unique to the thread, keeping in mind that the rest of the thread still contains the other messages with arguments that have already been made. + + Try to stay on topic, especially in discussions that are already fairly large. + +7. __Step down considerately.__ Members of every project come and go. When somebody leaves or disengages from the project they should tell people they are leaving and take the proper steps to ensure that others can pick up where they left off. In doing so, they should remain respectful of those who continue to participate in the project and should not misrepresent the project's goals or achievements. Likewise, community members should respect any individual's choice to leave the project.

+ + +## Diversity Statement ## + +Apache welcomes and encourages participation by everyone. We are committed to being a community that everyone feels good about joining. Although we may not be able to satisfy everyone, we will always work to treat everyone well. + +No matter how you identify yourself or how others perceive you: we welcome you. Though no list can hope to be comprehensive, we explicitly honour diversity in: age, culture, ethnicity, genotype, gender identity or expression, language, national origin, neurotype, phenotype, political beliefs, profession, race, religion, sexual orientation, socioeconomic status, subculture and technical ability. + +Though we welcome people fluent in all languages, Apache development is conducted in English. + +Standards for behaviour in the Apache community are detailed in the Code of Conduct above. We expect participants in our community to meet these standards in all their interactions and to help others to do so as well. + +## Reporting Guidelines ## + +While this code of conduct should be adhered to by participants, we recognize that sometimes people may have a bad day, or be unaware of some of the guidelines in this code of conduct. When that happens, you may reply to them and point out this code of conduct. Such messages may be in public or in private, whatever is most appropriate. However, regardless of whether the message is public or not, it should still adhere to the relevant parts of this code of conduct; in particular, it should not be abusive or disrespectful. + +If you believe someone is violating this code of conduct, you may reply to +them and point out this code of conduct. Such messages may be in public or in +private, whatever is most appropriate. Assume good faith; it is more likely +that participants are unaware of their bad behaviour than that they +intentionally try to degrade the quality of the discussion. Should there be +difficulties in dealing with the situation, you may report your compliance +issues in confidence to either: + + * President of the Apache Software Foundation: Sam Ruby (rubys at intertwingly dot net) + +or one of our volunteers: + + * [Mark Thomas](http://home.apache.org/~markt/coc.html) + * [Joan Touzet](http://home.apache.org/~wohali/) + * [Sharan Foga](http://home.apache.org/~sharan/coc.html) + +If the violation is in documentation or code, for example inappropriate pronoun usage or word choice within official documentation, we ask that people report these privately to the project in question at private@project.apache.org, and, if they have sufficient ability within the project, to resolve or remove the concerning material, being mindful of the perspective of the person originally reporting the issue. + + +## End Notes ## + +This Code defines __empathy__ as "a vicarious participation in the emotions, ideas, or opinions of others; the ability to imagine oneself in the condition or predicament of another." __Empathetic__ is the adjectival form of empathy. + + +This statement thanks the following, on which it draws for content and inspiration: + + + * [CouchDB Project Code of conduct](http://couchdb.apache.org/conduct.html) + * [Fedora Project Code of Conduct](http://fedoraproject.org/code-of-conduct) + * [Speak Up! Code of Conduct](http://speakup.io/coc.html) + * [Django Code of Conduct](https://www.djangoproject.com/conduct/) + * [Debian Code of Conduct](http://www.debian.org/vote/2014/vote_002) + * [Twitter Open Source Code of Conduct](https://github.com/twitter/code-of-conduct/blob/master/code-of-conduct.md) + * [Mozilla Code of Conduct/Draft](https://wiki.mozilla.org/Code_of_Conduct/Draft#Conflicts_of_Interest) + * [Python Diversity Appendix](https://www.python.org/community/diversity/) + * [Python Mentors Home Page](http://pythonmentors.com/) diff --git a/CODE_STYLE.md b/CODE_STYLE.md deleted file mode 100644 index 8b7a4ca2ef659..0000000000000 --- a/CODE_STYLE.md +++ /dev/null @@ -1,393 +0,0 @@ - - -# OpenResty Lua Coding Style Guide - -## indentation -Use 4 spaces as an indent in OpenResty, although Lua does not have such a grammar requirement. - -``` ---No -if a then -ngx.say("hello") -end -``` - -``` ---yes -if a then - ngx.say("hello") -end -``` - -You can simplify the operation by changing the tab to 4 spaces in the editor you are using. - -## Space -On both sides of the operator, you need to use a space to separate: - -``` ---No -local i=1 -local s = "apisix" -``` - -``` ---Yes -local i = 1 -local s = "apisix" -``` - -## Blank line -Many developers will bring the development habits of other languages to OpenResty, such as adding a semicolon at the end of the line. - -``` ---No -if a then -    ngx.say("hello"); -end; -``` - -Adding a semicolon will make the Lua code look ugly and unnecessary. Also, don't want to save the number of lines in the code, the latter turns the multi-line code into one line in order to appear "simple". This will not know when the positioning error is in the end of the code: - -``` ---No -if a then ngx.say("hello") end -``` - -``` ---yes -if a then - ngx.say("hello") -end -``` - -The functions needs to be separated by two blank lines: -``` ---No -local function foo() -end -local function bar() -end -``` - -``` ---Yes -local function foo() -end - - -local function bar() -end -``` - -If there are multiple if elseif branches, they need a blank line to separate them: -``` ---No -if a == 1 then - foo() -elseif a== 2 then - bar() -elseif a == 3 then - run() -else - error() -end -``` - -``` ---Yes -if a == 1 then - foo() - -elseif a== 2 then - bar() - -elseif a == 3 then - run() - -else - error() -end -``` - -## Maximum length per line -Each line cannot exceed 80 characters. If it exceeds, you need to wrap and align: - -``` ---No -return limit_conn_new("plugin-limit-conn", conf.conn, conf.burst, conf.default_conn_delay) -``` - -``` ---Yes -return limit_conn_new("plugin-limit-conn", conf.conn, conf.burst, - conf.default_conn_delay) -``` - -When the linefeed is aligned, the correspondence between the upper and lower lines should be reflected. For the example above, the parameters of the second line of functions are to the right of the left parenthesis of the first line. - -If it is a string stitching alignment, you need to put `..` in the next line: -``` ---No -return limit_conn_new("plugin-limit-conn" .. "plugin-limit-conn" .. - "plugin-limit-conn") -``` - -``` ---Yes -return limit_conn_new("plugin-limit-conn" .. "plugin-limit-conn" - .. "plugin-limit-conn") -``` - -``` ---Yes -return "param1", "plugin-limit-conn" - .. "plugin-limit-conn") -``` - -## Variable -Local variables should always be used, not global variables: -``` ---No -i = 1 -s = "apisix" -``` - -``` ---Yes -local i = 1 -local s = "apisix" -``` - -Variable naming uses the `snake_case` style: -``` ---No -local IndexArr = 1 -local str_Name = "apisix" -``` - -``` ---Yes -local index_arr = 1 -local str_name = "apisix" -``` - -Use all capitalization for constants: -``` ---No -local max_int = 65535 -local server_name = "apisix" -``` - -``` ---Yes -local MAX_INT = 65535 -local SERVER_NAME = "apisix" -``` - -## Table -Use `table.new` to pre-allocate the table: -``` ---No -local t = {} -for i = 1, 100 do - t[i] = i -end -``` - -``` ---Yes -local new_tab = require "table.new" -local t = new_tab(100, 0) -for i = 1, 100 do - t[i] = i -end -``` - -Don't use `nil` in an array: -``` ---No -local t = {1, 2, nil, 3} -``` - -If you must use null values, use `ngx.null` to indicate: -``` ---Yes -local t = {1, 2, ngx.null, 3} -``` - -## String -Do not splicing strings on the hot code path: -``` ---No -local s = "" -for i = 1, 100000 do - s = s .. "a" -end -``` - -``` ---Yes -local t = {} -for i = 1, 100000 do - t[i] = "a" -end -local s = table.concat(t, "") -``` - -## Function -The naming of functions also follows `snake_case`: -``` ---No -local function testNginx() -end -``` - -``` ---Yes -local function test_nginx() -end -``` - -The function should return as early as possible: -``` ---No -local function check(age, name) - local ret = true - if age < 20 then - ret = false - end - - if name == "a" then - ret = false - end - -- do something else - return ret -end -``` - -``` ---Yes -local function check(age, name) - if age < 20 then - return false - end - - if name == "a" then - return false - end - -- do something else - return true -end -``` - -## Module -All require libraries must be localized: -``` ---No -local function foo() - local ok, err = ngx.timer.at(delay, handler) -end -``` - -``` ---Yes -local timer_at = ngx.timer.at - -local function foo() - local ok, err = timer_at(delay, handler) -end -``` - -For style unification, `require` and `ngx` also need to be localized: -``` ---No -local core = require("apisix.core") -local timer_at = ngx.timer.at - -local function foo() - local ok, err = timer_at(delay, handler) -end -``` - -``` ---Yes -local ngx = ngx -local require = require -local core = require("apisix.core") -local timer_at = ngx.timer.at - -local function foo() - local ok, err = timer_at(delay, handler) -end -``` - -## Error handling -For functions that return with error information, the error information must be judged and processed: -``` ---No -local sock = ngx.socket.tcp() -local ok = sock:connect("www.google.com", 80) -ngx.say("successfully connected to google!") -``` - -``` ---Yes -local sock = ngx.socket.tcp() -local ok, err = sock:connect("www.google.com", 80) -if not ok then - ngx.say("failed to connect to google: ", err) - return -end -ngx.say("successfully connected to google!") -``` - -The function you wrote yourself, the error message is to be returned as a second parameter in the form of a string: -``` ---No -local function foo() - local ok, err = func() - if not ok then - return false - end - return true -end -``` - -``` ---No -local function foo() - local ok, err = func() - if not ok then - return false, {msg = err} - end - return true -end -``` - -``` ---Yes -local function foo() - local ok, err = func() - if not ok then - return false, "failed to call func(): " .. err - end - return true -end -``` diff --git a/FAQ.md b/FAQ.md index 4b56fce0355d0..dbf365e1f5c0f 100644 --- a/FAQ.md +++ b/FAQ.md @@ -71,14 +71,14 @@ Run the `luarocks config rocks_servers` command(this command is supported after If using a proxy doesn't solve this problem, you can add `--verbose` option during installation to see exactly how slow it is. Excluding the first case, only the second that the `git` protocol is blocked. Then we can run `git config --global url."https://".insteadOf git://` to using the 'HTTPS' protocol instead of `git`. -## How to support A/B testing via APISIX? +## How to support gray release via APISIX? -An example, if you want to group by the request param `arg_id`: +An example, `foo.com/product/index.html?id=204&page=2`, gray release based on `id` in the query string in uri as a condition: -1. Group A:arg_id <= 1000 -2. Group B:arg_id > 1000 +1. Group A:id <= 1000 +2. Group B:id > 1000 -here is the way: +here is the way: ```shell curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { @@ -107,11 +107,95 @@ curl -i http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335 }' ``` + Here is the operator list of current `lua-resty-radixtree`: https://github.com/iresty/lua-resty-radixtree#operator-list +## How to redirect http to https via APISIX? + +An example, redirect `http://foo.com` to `https://foo.com` + +There are several different ways to do this. +1. Directly use the `http_to_https` in `redirect` plugin: +```shell +curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/hello", + "host": "foo.com", + "plugins": { + "redirect": { + "http_to_https": true + } + } +}' +``` + +2. Use with advanced routing rule `vars` with `redirect` plugin: + +```shell +curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/hello", + "host": "foo.com", + "vars": [ + [ + "scheme", + "==", + "http" + ] + ], + "plugins": { + "redirect": { + "uri": "https://$host$request_uri", + "ret_code": 301 + } + } +}' +``` + +3. `serverless` plugin: + +```shell +curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/hello", + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions": ["return function() if ngx.var.scheme == \"http\" and ngx.var.host == \"foo.com\" then ngx.header[\"Location\"] = \"https://foo.com\" .. ngx.var.request_uri; ngx.exit(ngx.HTTP_MOVED_PERMANENTLY); end; end"] + } + } +}' +``` + +Then test it to see if it works: +```shell +curl -i -H 'Host: foo.com' http://127.0.0.1:9080/hello +``` + +The response body should be: +``` +HTTP/1.1 301 Moved Permanently +Date: Mon, 18 May 2020 02:56:04 GMT +Content-Type: text/html +Content-Length: 166 +Connection: keep-alive +Location: https://foo.com/hello +Server: APISIX web server + + +301 Moved Permanently + +

301 Moved Permanently

+
openresty
+ + +``` + + ## How to fix OpenResty Installation Failure on MacOS 10.15 When you install the OpenResty on MacOs 10.15, you may face this error + ```shell > brew install openresty Updating Homebrew... @@ -172,3 +256,17 @@ Steps: 2. Restart APISIX Now you can trace the info level log in logs/error.log. + +## How to reload your own plugin + +The Apache APISIX plugin supports hot reloading. If your APISIX node has the Admin API turned on, then for scenarios such as adding / deleting / modifying plugins, you can hot reload the plugin by calling the HTTP interface without restarting the service. + +```shell +curl http://127.0.0.1:9080/apisix/admin/plugins/reload -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT +``` + +If your APISIX node does not open the Admin API, then you can manually load the plug-in by reloading APISIX. + +```shell +apisix reload +``` diff --git a/FAQ_CN.md b/FAQ_CN.md index 8d1e52824b514..334fc693c7397 100644 --- a/FAQ_CN.md +++ b/FAQ_CN.md @@ -45,7 +45,7 @@ APISIX 是当前性能最好的 API 网关,单核 QPS 达到 2.3 万,平均 当然可以,APISIX 提供了灵活的自定义插件,方便开发者和企业编写自己的逻辑。 -[如何开发插件](doc/plugin-develop-cn.md) +[如何开发插件](doc/zh-cn/plugin-develop.md) ## 我们为什么选择 etcd 作为配置中心? @@ -73,14 +73,15 @@ luarocks 服务。 运行 `luarocks config rocks_servers` 命令(这个命令 如果使用代理仍然解决不了这个问题,那可以在安装的过程中添加 `--verbose` 选项来查看具体是慢在什么地方。排除前面的 第一种情况,只可能是第二种,`git` 协议被封。这个时候可以执行 `git config --global url."https://".insteadOf git://` 命令使用 `https` 协议替代。 -## 如何通过APISIX支持A/B测试? +## 如何通过 APISIX 支持灰度发布? -比如,根据入参`arg_id`分组: +比如,`foo.com/product/index.html?id=204&page=2`, 根据 uri 中 query string 中的 `id` 作为条件来灰度发布: -1. A组:arg_id <= 1000 -2. B组:arg_id > 1000 +1. A组:id <= 1000 +2. B组:id > 1000 可以这么做: + ```shell curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { @@ -109,9 +110,91 @@ curl -i http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335 }' ``` + 更多的 lua-resty-radixtree 匹配操作,可查看操作列表: https://github.com/iresty/lua-resty-radixtree#operator-list +## 如何支持 http 自动跳转到 https? + +比如,将 `http://foo.com` 重定向到 `https://foo.com` + +有几种不同的方法来实现: +1. 直接使用 `redirect` 插件的 `http_to_https` 功能: +```shell +curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/hello", + "host": "foo.com", + "plugins": { + "redirect": { + "http_to_https": true + } + } +}' +``` + +2. 结合高级路由规则 `vars` 和 `redirect` 插件一起使用: + +```shell +curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/hello", + "host": "foo.com", + "vars": [ + [ + "scheme", + "==", + "http" + ] + ], + "plugins": { + "redirect": { + "uri": "https://$host$request_uri", + "ret_code": 301 + } + } +}' +``` + +3. 使用`serverless`插件: + +```shell +curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/hello", + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions": ["return function() if ngx.var.scheme == \"http\" and ngx.var.host == \"foo.com\" then ngx.header[\"Location\"] = \"https://foo.com\" .. ngx.var.request_uri; ngx.exit(ngx.HTTP_MOVED_PERMANENTLY); end; end"] + } + } +}' +``` + +然后测试下是否生效: +```shell +curl -i -H 'Host: foo.com' http://127.0.0.1:9080/hello +``` + +响应体应该是: +``` +HTTP/1.1 301 Moved Permanently +Date: Mon, 18 May 2020 02:56:04 GMT +Content-Type: text/html +Content-Length: 166 +Connection: keep-alive +Location: https://foo.com/hello +Server: APISIX web server + + +301 Moved Permanently + +

301 Moved Permanently

+
openresty
+ + +``` + ## 如何修改日志等级 默认的APISIX日志等级为`warn`,如果需要查看`core.log.info`的打印结果需要将日志等级调整为`info`。 @@ -123,3 +206,17 @@ https://github.com/iresty/lua-resty-radixtree#operator-list 2、重启APISIX 之后便可以在logs/error.log中查看到info的日志了。 + +## 如何加载自己编写的插件 + +Apache APISIX 的插件支持热加载,如果你的 APISIX 节点打开了 Admin API,那么对于新增/删除/修改插件等场景,均可以通过调用 HTTP 接口的方式热加载插件,不需要重启服务。 + +```shell +curl http://127.0.0.1:9080/apisix/admin/plugins/reload -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT +``` + +如果你的 APISIX 节点并没有打开 Admin API,那么你可以通过手动 reload APISIX 的方式加载插件。 + +```shell +apisix reload +``` diff --git a/Makefile b/Makefile index 1ecc3074276a8..5836e3690988a 100644 --- a/Makefile +++ b/Makefile @@ -79,9 +79,13 @@ init: default ### run: Start the apisix server .PHONY: run run: default +ifeq ("$(wildcard logs/nginx.pid)", "") mkdir -p logs mkdir -p /tmp/apisix_cores/ $(OR_EXEC) -p $$PWD/ -c $$PWD/conf/nginx.conf +else + @echo "APISIX is running..." +endif ### stop: Stop the apisix server @@ -110,7 +114,7 @@ reload: default ### install: Install the apisix .PHONY: install -install: +install: default $(INSTALL) -d /usr/local/apisix/ $(INSTALL) -d /usr/local/apisix/logs/ $(INSTALL) -d /usr/local/apisix/conf/cert @@ -124,6 +128,9 @@ install: $(INSTALL) -d $(INST_LUADIR)/apisix/admin $(INSTALL) apisix/admin/*.lua $(INST_LUADIR)/apisix/admin/ + $(INSTALL) -d $(INST_LUADIR)/apisix/balancer + $(INSTALL) apisix/balancer/*.lua $(INST_LUADIR)/apisix/balancer/ + $(INSTALL) -d $(INST_LUADIR)/apisix/core $(INSTALL) apisix/core/*.lua $(INST_LUADIR)/apisix/core/ @@ -133,6 +140,9 @@ install: $(INSTALL) -d $(INST_LUADIR)/apisix/http/router $(INSTALL) apisix/http/router/*.lua $(INST_LUADIR)/apisix/http/router/ + $(INSTALL) -d $(INST_LUADIR)/apisix/discovery + $(INSTALL) apisix/discovery/*.lua $(INST_LUADIR)/apisix/discovery/ + $(INSTALL) -d $(INST_LUADIR)/apisix/plugins $(INSTALL) apisix/plugins/*.lua $(INST_LUADIR)/apisix/plugins/ @@ -148,6 +158,9 @@ install: $(INSTALL) -d $(INST_LUADIR)/apisix/plugins/zipkin $(INSTALL) apisix/plugins/zipkin/*.lua $(INST_LUADIR)/apisix/plugins/zipkin/ + $(INSTALL) -d $(INST_LUADIR)/apisix/plugins/skywalking + $(INSTALL) apisix/plugins/skywalking/*.lua $(INST_LUADIR)/apisix/plugins/skywalking/ + $(INSTALL) -d $(INST_LUADIR)/apisix/stream/plugins $(INSTALL) apisix/stream/plugins/*.lua $(INST_LUADIR)/apisix/stream/plugins/ diff --git a/README.md b/README.md index a0102b429f025..3572484d373be 100644 --- a/README.md +++ b/README.md @@ -39,8 +39,6 @@ APISIX is a cloud-based microservices API gateway that handles traditional north APISIX provides dynamic load balancing, authentication, rate limiting, other plugins through plugin mechanisms, and supports plugins you develop yourself. -For more detailed information, see the [White Paper](https://www.iresty.com/download/Choosing%20the%20Right%20Microservice%20API%20Gateway%20for%20the%20Enterprise%20User.pdf). - ![](doc/images/apisix.png) ## Features @@ -50,7 +48,7 @@ A/B testing, canary release, blue-green deployment, limit rate, defense against - **All platforms** - Cloud-Native: Platform agnostic, No vendor lock-in, APISIX can run from bare-metal to Kubernetes. - Run Environment: Both OpenResty and Tengine are supported. - - Supports [ARM64](https://zhuanlan.zhihu.com/p/84467919): Don't worry about the lock-in of the infra technology. + - Supports ARM64: Don't worry about the lock-in of the infra technology. - **Multi protocols** - [TCP/UDP Proxy](doc/stream-proxy.md): Dynamic TCP/UDP proxy. @@ -72,6 +70,7 @@ A/B testing, canary release, blue-green deployment, limit rate, defense against - Hash-based Load Balancing: Load balance with consistent hashing sessions. - [Health Checks](doc/health-check.md): Enable health check on the upstream node, and will automatically filter unhealthy nodes during load balancing to ensure system stability. - Circuit-Breaker: Intelligent tracking of unhealthy upstream services. + - [Dynamic service discovery](doc/discovery.md):Support service discovery based on registry, reduce the reverse proxy maintenance costs. - **Fine-grained routing** - [Supports full path matching and prefix matching](doc/router-radixtree.md#how-to-use-libradixtree-in-apisix) @@ -79,7 +78,7 @@ A/B testing, canary release, blue-green deployment, limit rate, defense against - Support [various operators as judgment conditions for routing](https://github.com/iresty/lua-resty-radixtree#operator-list), for example `{"arg_age", ">", 24}` - Support [custom route matching function](https://github.com/iresty/lua-resty-radixtree/blob/master/t/filter-fun.t#L10) - IPv6: Use IPv6 to match route. - - Support [TTL](doc/admin-api-cn.md#route) + - Support [TTL](doc/zh-cn/admin-api.md#route) - [Support priority](doc/router-radixtree.md#3-match-priority) - [Support Batch Http Requests](doc/plugins/batch-requests.md) @@ -91,10 +90,11 @@ A/B testing, canary release, blue-green deployment, limit rate, defense against - [Limit-count](doc/plugins/limit-count.md) - [Limit-concurrency](doc/plugins/limit-conn.md) - Anti-ReDoS(Regular expression Denial of Service): Built-in policies to Anti ReDoS without configuration. - - [CORS](doc/plugins/cors.md) + - [CORS](doc/plugins/cors.md) Enable CORS(Cross-origin resource sharing) for your API. + - [uri-blocker](plugins/uri-blocker.md): Block client request by URI. - **OPS friendly** - - OpenTracing: [support Apache Skywalking and Zipkin](doc/plugins/zipkin.md) + - OpenTracing: support [Apache Skywalking](doc/plugins/skywalking.md) and [Zipkin](doc/plugins/zipkin.md) - Monitoring And Metrics: [Prometheus](doc/plugins/prometheus.md) - Clustering: APISIX nodes are stateless, creates clustering of the configuration center, please refer to [etcd Clustering Guide](https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/clustering.md). - High availability: support to configure multiple etcd addresses in the same cluster. @@ -106,7 +106,6 @@ A/B testing, canary release, blue-green deployment, limit rate, defense against - High performance: The single-core QPS reaches 18k with an average delay of less than 0.2 milliseconds. - [Fault Injection](doc/plugins/fault-injection.md) - [REST Admin API](doc/admin-api.md): Using the REST Admin API to control Apache APISIX, which only allows 127.0.0.1 access by default, you can modify the `allow_admin` field in `conf/config.yaml` to specify a list of IPs that are allowed to call the Admin API. Also note that the Admin API uses key auth to verify the identity of the caller. **The `admin_key` field in `conf/config.yaml` needs to be modified before deployment to ensure security**. - - [Python SDK](https://github.com/api7/apache-apisix-python-sdk) - External Loggers: Export access logs to external log management tools. ([HTTP Logger](doc/plugins/http-logger.md), [TCP Logger](doc/plugins/tcp-logger.md), [Kafka Logger](doc/plugins/kafka-logger.md), [UDP Logger](doc/plugins/udp-logger.md)) - **Highly scalable** @@ -121,7 +120,7 @@ APISIX Installed and tested in the following systems(OpenResty MUST >= 1.15.8.1, CentOS 7, Ubuntu 16.04, Ubuntu 18.04, Debian 9, Debian 10, macOS, **ARM64** Ubuntu 18.04 Steps to install APISIX: -1. Installation runtime dependencies: OpenResty and etcd, refer to [documentation](doc/install-dependencies.md) +1. Installation runtime dependencies: Nginx and etcd, refer to [documentation](doc/install-dependencies.md) 2. There are several ways to install Apache APISIX: - [Source Release](doc/how-to-build.md#installation-via-source-release) - [RPM package](doc/how-to-build.md#installation-via-rpm-package-centos-7) for CentOS 7 @@ -171,8 +170,6 @@ Do not need to fill the user name and password, log in directly. The dashboard only allows 127.0.0.1 by default, and you can modify `allow_admin` in `conf/config.yaml` by yourself, to list the list of IPs allowed to access. -We provide an online dashboard [demo version](http://apisix.iresty.com), make it easier for you to understand APISIX. - ## Benchmark Using AWS's 8 core server, APISIX's QPS reach to 140,000 with a latency of only 0.2 ms. diff --git a/README_CN.md b/README_CN.md index d3e79098cbceb..408fc2227a1b0 100644 --- a/README_CN.md +++ b/README_CN.md @@ -29,7 +29,7 @@ APISIX 是一个云原生、高性能、可扩展的微服务 API 网关。 -它是基于 OpenResty 和 etcd 来实现,和传统 API 网关相比,APISIX 具备动态路由和插件热加载,特别适合微服务体系下的 API 管理。 +它是基于 Nginx 和 etcd 来实现,和传统 API 网关相比,APISIX 具备动态路由和插件热加载,特别适合微服务体系下的 API 管理。 ## 为什么选择 APISIX? @@ -39,8 +39,6 @@ APISIX 是基于云原生的微服务 API 网关,它是所有业务流量的 APISIX 通过插件机制,提供动态负载平衡、身份验证、限流限速等功能,并且支持你自己开发的插件。 -更多详细的信息,可以查阅[ APISIX 的白皮书](https://www.iresty.com/download/%E4%BC%81%E4%B8%9A%E7%94%A8%E6%88%B7%E5%A6%82%E4%BD%95%E9%80%89%E6%8B%A9%E5%BE%AE%E6%9C%8D%E5%8A%A1%20API%20%E7%BD%91%E5%85%B3.pdf) - ![](doc/images/apisix.png) ## 功能 @@ -50,28 +48,29 @@ A/B 测试、金丝雀发布(灰度发布)、蓝绿部署、限流限速、抵 - **全平台** - 云原生: 平台无关,没有供应商锁定,无论裸机还是 Kubernetes,APISIX 都可以运行。 - 运行环境: OpenResty 和 Tengine 都支持。 - - 支持 [ARM64](https://zhuanlan.zhihu.com/p/84467919): 不用担心底层技术的锁定。 + - 支持 ARM64: 不用担心底层技术的锁定。 - **多协议** - - [TCP/UDP 代理](doc/stream-proxy-cn.md): 动态 TCP/UDP 代理。 - - [动态 MQTT 代理](doc/plugins/mqtt-proxy-cn.md): 支持用 `client_id` 对 MQTT 进行负载均衡,同时支持 MQTT [3.1.*](http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html) 和 [5.0](https://docs.oasis-open.org/mqtt/mqtt/v5.0/mqtt-v5.0.html) 两个协议标准。 - - [gRPC 代理](doc/grpc-proxy-cn.md):通过 APISIX 代理 gRPC 连接,并使用 APISIX 的大部分特性管理你的 gRPC 服务。 + - [TCP/UDP 代理](doc/zh-cn/stream-proxy.md): 动态 TCP/UDP 代理。 + - [动态 MQTT 代理](doc/zh-cn/plugins/mqtt-proxy.md): 支持用 `client_id` 对 MQTT 进行负载均衡,同时支持 MQTT [3.1.*](http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html) 和 [5.0](https://docs.oasis-open.org/mqtt/mqtt/v5.0/mqtt-v5.0.html) 两个协议标准。 + - [gRPC 代理](doc/zh-cn/grpc-proxy.md):通过 APISIX 代理 gRPC 连接,并使用 APISIX 的大部分特性管理你的 gRPC 服务。 - [gRPC 协议转换](doc/plugins/grpc-transcoding-cn.md):支持协议的转换,这样客户端可以通过 HTTP/JSON 来访问你的 gRPC API。 - Websocket 代理 - Proxy Protocol - Dubbo 代理:基于 Tengine,可以实现 Dubbo 请求的代理。 - HTTP(S) 反向代理 - - [SSL](doc/https-cn.md):动态加载 SSL 证书。 + - [SSL](doc/zh-cn/https.md):动态加载 SSL 证书。 - **全动态能力** - - [热更新和热插件](doc/plugins-cn.md): 无需重启服务,就可以持续更新配置和插件。 - - [代理请求重写](doc/plugins/proxy-rewrite-cn.md): 支持重写请求上游的`host`、`uri`、`schema`、`enable_websocket`、`headers`信息。 - - [输出内容重写](doc/plugins/response-rewrite-cn.md): 支持自定义修改返回内容的 `status code`、`body`、`headers`。 - - [Serverless](doc/plugins/serverless-cn.md): 在 APISIX 的每一个阶段,你都可以添加并调用自己编写的函数。 + - [热更新和热插件](doc/zh-cn/plugins.md): 无需重启服务,就可以持续更新配置和插件。 + - [代理请求重写](doc/zh-cn/plugins/proxy-rewrite.md): 支持重写请求上游的`host`、`uri`、`schema`、`enable_websocket`、`headers`信息。 + - [输出内容重写](doc/zh-cn/plugins/response-rewrite.md): 支持自定义修改返回内容的 `status code`、`body`、`headers`。 + - [Serverless](doc/zh-cn/plugins/serverless.md): 在 APISIX 的每一个阶段,你都可以添加并调用自己编写的函数。 - 动态负载均衡:动态支持有权重的 round-robin 负载平衡。 - 支持一致性 hash 的负载均衡:动态支持一致性 hash 的负载均衡。 - [健康检查](doc/health-check.md):启用上游节点的健康检查,将在负载均衡期间自动过滤不健康的节点,以确保系统稳定性。 - 熔断器: 智能跟踪不健康上游服务。 + - [动态服务发现](doc/zh-cn/discovery.md):支持基于注册中心的服务发现功能,降低反向代理维护成本。 - **精细化路由** - [支持全路径匹配和前缀匹配](doc/router-radixtree.md#how-to-use-libradixtree-in-apisix) @@ -79,38 +78,38 @@ A/B 测试、金丝雀发布(灰度发布)、蓝绿部署、限流限速、抵 - 支持[各类操作符做为路由的判断条件](https://github.com/iresty/lua-resty-radixtree#operator-list),比如 `{"arg_age", ">", 24}` - 支持[自定义路由匹配函数](https://github.com/iresty/lua-resty-radixtree/blob/master/t/filter-fun.t#L10) - IPv6:支持使用 IPv6 格式匹配路由 - - 支持路由的[自动过期(TTL)](doc/admin-api-cn.md#route) + - 支持路由的[自动过期(TTL)](doc/zh-cn/admin-api.md#route) - [支持路由的优先级](doc/router-radixtree.md#3-match-priority) - - [支持批量 Http 请求](doc/plugins/batch-requests-cn.md) + - [支持批量 Http 请求](doc/zh-cn/plugins/batch-requests.md) - **安全防护** - - 多种身份认证方式: [key-auth](doc/plugins/key-auth-cn.md), [JWT](doc/plugins/jwt-auth-cn.md), [basic-auth](doc/plugins/basic-auth-cn.md), [wolf-rbac](doc/plugins/wolf-rbac-cn.md)。 - - [IP 黑白名单](doc/plugins/ip-restriction-cn.md) + - 多种身份认证方式: [key-auth](doc/zh-cn/plugins/key-auth.md), [JWT](doc/zh-cn/plugins/jwt-auth.md), [basic-auth](doc/zh-cn/plugins/basic-auth.md), [wolf-rbac](doc/zh-cn/plugins/wolf-rbac.md)。 + - [IP 黑白名单](doc/zh-cn/plugins/ip-restriction.md) - [IdP 支持](doc/plugins/oauth.md): 支持外部的身份认证服务,比如 Auth0,Okta,Authing 等,用户可以借此来对接 Oauth2.0 等认证方式。 - - [限制速率](doc/plugins/limit-req-cn.md) - - [限制请求数](doc/plugins/limit-count-cn.md) - - [限制并发](doc/plugins/limit-conn-cn.md) + - [限制速率](doc/zh-cn/plugins/limit-req.md) + - [限制请求数](doc/zh-cn/plugins/limit-count.md) + - [限制并发](doc/zh-cn/plugins/limit-conn.md) - 防御 ReDoS(正则表达式拒绝服务):内置策略,无需配置即可抵御 ReDoS。 - - [CORS](doc/plugins/cors-cn.md) + - [CORS](doc/zh-cn/plugins/cors.md):为你的API启用 CORS。 + - [uri-blocker](plugins/uri-blocker.md):根据 URI 拦截用户请求。 - **运维友好** - - OpenTracing 可观测性: [支持 Apache Skywalking 和 Zipkin](doc/plugins/zipkin-cn.md)。 - - 监控和指标: [Prometheus](doc/plugins/prometheus-cn.md) + - OpenTracing 可观测性: 支持 [Apache Skywalking](doc/zh-cn/plugins/skywalking.md) 和 [Zipkin](doc/zh-cn/plugins/zipkin.md)。 + - 监控和指标: [Prometheus](doc/zh-cn/plugins/prometheus.md) - 集群:APISIX 节点是无状态的,创建配置中心集群请参考 [etcd Clustering Guide](https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/clustering.md)。 - 高可用:支持配置同一个集群内的多个 etcd 地址。 - 控制台: 内置控制台来操作 APISIX 集群。 - 版本控制:支持操作的多次回滚。 - CLI: 使用命令行来启动、关闭和重启 APISIX。 - - [单机模式](doc/stand-alone-cn.md): 支持从本地配置文件中加载路由规则,在 kubernetes(k8s) 等环境下更友好。 - - [全局规则](doc/architecture-design-cn.md#Global-Rule):允许对所有请求执行插件,比如黑白名单、限流限速等。 + - [单机模式](doc/zh-cn/stand-alone.md): 支持从本地配置文件中加载路由规则,在 kubernetes(k8s) 等环境下更友好。 + - [全局规则](doc/zh-cn/architecture-design.md#Global-Rule):允许对所有请求执行插件,比如黑白名单、限流限速等。 - 高性能:在单核上 QPS 可以达到 18k,同时延迟只有 0.2 毫秒。 - - [故障注入](doc/plugins/fault-injection-cn.md) - - [REST Admin API](doc/admin-api-cn.md): 使用 REST Admin API 来控制 Apache APISIX,默认只允许 127.0.0.1 访问,你可以修改 `conf/config.yaml` 中的 `allow_admin` 字段,指定允许调用 Admin API 的 IP 列表。同时需要注意的是,Admin API 使用 key auth 来校验调用者身份,**在部署前需要修改 `conf/config.yaml` 中的 `admin_key` 字段,来保证安全。** - - [Python SDK](https://github.com/api7/apache-apisix-python-sdk) + - [故障注入](doc/zh-cn/plugins/fault-injection.md) + - [REST Admin API](doc/zh-cn/admin-api.md): 使用 REST Admin API 来控制 Apache APISIX,默认只允许 127.0.0.1 访问,你可以修改 `conf/config.yaml` 中的 `allow_admin` 字段,指定允许调用 Admin API 的 IP 列表。同时需要注意的是,Admin API 使用 key auth 来校验调用者身份,**在部署前需要修改 `conf/config.yaml` 中的 `admin_key` 字段,来保证安全。** - 外部日志记录器:将访问日志导出到外部日志管理工具。([HTTP Logger](doc/plugins/http-logger.md), [TCP Logger](doc/plugins/tcp-logger.md), [Kafka Logger](doc/plugins/kafka-logger.md), [UDP Logger](doc/plugins/udp-logger.md)) - **高度可扩展** - - [自定义插件](doc/plugin-develop-cn.md): 允许挂载常见阶段,例如`init`, `rewrite`,`access`,`balancer`,`header filer`,`body filter` 和 `log` 阶段。 + - [自定义插件](doc/zh-cn/plugin-develop.md): 允许挂载常见阶段,例如`init`, `rewrite`,`access`,`balancer`,`header filer`,`body filter` 和 `log` 阶段。 - 自定义负载均衡算法:可以在 `balancer` 阶段使用自定义负载均衡算法。 - 自定义路由: 支持用户自己实现路由算法。 @@ -118,14 +117,14 @@ A/B 测试、金丝雀发布(灰度发布)、蓝绿部署、限流限速、抵 APISIX 在以下操作系统中可顺利安装并做过运行测试,需要注意的是:OpenResty 的版本必须 >= 1.15.8.1: -CentOS 7, Ubuntu 16.04, Ubuntu 18.04, Debian 9, Debian 10, macOS, **[ARM64](https://zhuanlan.zhihu.com/p/84467919)** Ubuntu 18.04 +CentOS 7, Ubuntu 16.04, Ubuntu 18.04, Debian 9, Debian 10, macOS, **ARM64** Ubuntu 18.04 安装 APISIX 的步骤: -1. 安装运行时依赖:OpenResty 和 etcd,参考[依赖安装文档](doc/install-dependencies.md) +1. 安装运行时依赖:OpenResty 和 etcd,参考[依赖安装文档](doc/zh-cn/install-dependencies.md) 2. 有以下几种方式来安装 Apache APISIX: - - 通过[源码包安装](doc/how-to-build-cn.md#通过源码包安装); - - 如果你在使用 CentOS 7,可以使用 [RPM 包安装](doc/how-to-build-cn.md#通过-rpm-包安装centos-7); - - 其它 Linux 操作系统,可以使用 [Luarocks 安装方式](doc/how-to-build-cn.md#通过-luarocks-安装-不支持-macos); + - 通过[源码包安装](doc/zh-cn/how-to-build.md#通过源码包安装); + - 如果你在使用 CentOS 7,可以使用 [RPM 包安装](doc/zh-cn/how-to-build.md#通过-rpm-包安装centos-7); + - 其它 Linux 操作系统,可以使用 [Luarocks 安装方式](doc/zh-cn/how-to-build.md#通过-luarocks-安装-不支持-macos); - 你也可以使用 [Docker 镜像](https://github.com/apache/incubator-apisix-docker) 来安装。 ## 快速上手 @@ -138,9 +137,9 @@ sudo apisix start 2. 入门指南 -入门指南是学习 APISIX 基础知识的好方法。按照 [入门指南](doc/getting-started-cn.md)的步骤即可。 +入门指南是学习 APISIX 基础知识的好方法。按照 [入门指南](doc/zh-cn/getting-started.md)的步骤即可。 -更进一步,你可以跟着文档来尝试更多的[插件](doc/README_CN.md#插件)。 +更进一步,你可以跟着文档来尝试更多的[插件](doc/zh-cn/README.md#插件)。 ## 控制台 @@ -172,15 +171,13 @@ cp -r dist/* . Dashboard 默认只允许 127.0.0.1 访问。你可以自行修改 `conf/config.yaml` 中的 `allow_admin` 字段,指定允许访问 dashboard 的 IP 列表。 -我们部署了一个在线的 [Dashboard](http://apisix.iresty.com) ,方便你了解 APISIX。 - ## 性能测试 使用 AWS 的 8 核心服务器来压测 APISIX,QPS 可以达到 140000,同时延时只有 0.2 毫秒。 ## 文档 -[Apache APISIX 文档索引](doc/README_CN.md) +[Apache APISIX 文档索引](doc/zh-cn/README.md) ## Apache APISIX 和 Kong 的比较 @@ -241,7 +238,7 @@ Dashboard 默认只允许 127.0.0.1 访问。你可以自行修改 `conf/config. - [思必驰:为什么我们重新写了一个 k8s ingress controller?](https://mp.weixin.qq.com/s/bmm2ibk2V7-XYneLo9XAPQ) ## APISIX 的用户有哪些? -有很多公司和组织把 APISIX 用户学习、研究、生产环境和商业产品中,包括: +有很多公司和组织把 APISIX 用于学习、研究、生产环境和商业产品中,包括: diff --git a/apisix/admin/global_rules.lua b/apisix/admin/global_rules.lua index c74d7739d2cf6..a768012f99604 100644 --- a/apisix/admin/global_rules.lua +++ b/apisix/admin/global_rules.lua @@ -43,6 +43,8 @@ local function check_conf(id, conf, need_id) return nil, {error_msg = "wrong route id"} end + conf.id = id + core.log.info("schema: ", core.json.delay_encode(core.schema.global_rule)) core.log.info("conf : ", core.json.delay_encode(conf)) local ok, err = core.schema.check(core.schema.global_rule, conf) @@ -104,19 +106,19 @@ function _M.delete(id) end -function _M.patch(id, conf, sub_path) +function _M.patch(id, conf) if not id then return 400, {error_msg = "missing global rule id"} end - if not sub_path then - return 400, {error_msg = "missing sub-path"} - end - if not conf then return 400, {error_msg = "missing new configuration"} end + if type(conf) ~= "table" then + return 400, {error_msg = "invalid configuration"} + end + local key = "/global_rules/" .. id local res_old, err = core.etcd.get(key) if not res_old then @@ -131,32 +133,9 @@ function _M.patch(id, conf, sub_path) core.json.delay_encode(res_old, true)) local node_value = res_old.body.node.value - local sub_value = node_value - local sub_paths = core.utils.split_uri(sub_path) - for i = 1, #sub_paths - 1 do - local sub_name = sub_paths[i] - if sub_value[sub_name] == nil then - sub_value[sub_name] = {} - end - sub_value = sub_value[sub_name] + node_value = core.table.merge(node_value, conf); - if type(sub_value) ~= "table" then - return 400, "invalid sub-path: /" - .. core.table.concat(sub_paths, 1, i) - end - end - - if type(sub_value) ~= "table" then - return 400, "invalid sub-path: /" .. sub_path - end - - local sub_name = sub_paths[#sub_paths] - if sub_name and sub_name ~= "" then - sub_value[sub_name] = conf - else - node_value = conf - end core.log.info("new conf: ", core.json.delay_encode(node_value, true)) local ok, err = check_conf(id, node_value, true) diff --git a/apisix/admin/plugins.lua b/apisix/admin/plugins.lua index 7d6262c59c103..7b835e10ca436 100644 --- a/apisix/admin/plugins.lua +++ b/apisix/admin/plugins.lua @@ -18,9 +18,13 @@ local core = require("apisix.core") local local_plugins = require("apisix.plugin").plugins_hash local stream_local_plugins = require("apisix.plugin").stream_plugins_hash local pairs = pairs +local ipairs = ipairs local pcall = pcall local require = require local table_remove = table.remove +local table_sort = table.sort +local table_insert = table.insert + local _M = { version = 0.1, @@ -114,7 +118,23 @@ function _M.get_plugins_list() table_remove(plugins, 1) end - return plugins + local priorities = {} + local success = {} + for i, name in ipairs(plugins) do + local plugin_name = "apisix.plugins." .. name + local ok, plugin = pcall(require, plugin_name) + if ok and plugin.priority then + priorities[name] = plugin.priority + table_insert(success, name) + end + end + + local function cmp(x, y) + return priorities[x] > priorities[y] + end + + table_sort(success, cmp) + return success end diff --git a/apisix/admin/routes.lua b/apisix/admin/routes.lua index 3303e8dc0d0cf..2ce284be71c93 100644 --- a/apisix/admin/routes.lua +++ b/apisix/admin/routes.lua @@ -45,6 +45,8 @@ local function check_conf(id, conf, need_id) return nil, {error_msg = "wrong route id"} end + conf.id = id + core.log.info("schema: ", core.json.delay_encode(core.schema.route)) core.log.info("conf : ", core.json.delay_encode(conf)) local ok, err = core.schema.check(core.schema.route, conf) @@ -135,7 +137,7 @@ function _M.put(id, conf, sub_path, args) local key = "/routes/" .. id local res, err = core.etcd.set(key, conf, args.ttl) if not res then - core.log.error("failed to put route[", key, "]: ", err) + core.log.error("failed to put route[", key, "] to etcd: ", err) return 500, {error_msg = err} end @@ -151,7 +153,7 @@ function _M.get(id) local res, err = core.etcd.get(key) if not res then - core.log.error("failed to get route[", key, "]: ", err) + core.log.error("failed to get route[", key, "] from etcd: ", err) return 500, {error_msg = err} end @@ -169,7 +171,7 @@ function _M.post(id, conf, sub_path, args) -- core.log.info("key: ", key) local res, err = core.etcd.push("/routes", conf, args.ttl) if not res then - core.log.error("failed to post route[", key, "]: ", err) + core.log.error("failed to post route[", key, "] to etcd: ", err) return 500, {error_msg = err} end @@ -186,7 +188,7 @@ function _M.delete(id) -- core.log.info("key: ", key) local res, err = core.etcd.delete(key) if not res then - core.log.error("failed to delete route[", key, "]: ", err) + core.log.error("failed to delete route[", key, "] in etcd: ", err) return 500, {error_msg = err} end @@ -194,19 +196,19 @@ function _M.delete(id) end -function _M.patch(id, conf, sub_path, args) +function _M.patch(id, conf, args) if not id then return 400, {error_msg = "missing route id"} end - if not sub_path then - return 400, {error_msg = "missing sub-path"} - end - if not conf then return 400, {error_msg = "missing new configuration"} end + if type(conf) ~= "table" then + return 400, {error_msg = "invalid configuration"} + end + local key = "/routes" if id then key = key .. "/" .. id @@ -214,7 +216,7 @@ function _M.patch(id, conf, sub_path, args) local res_old, err = core.etcd.get(key) if not res_old then - core.log.error("failed to get route [", key, "]: ", err) + core.log.error("failed to get route [", key, "] in etcd: ", err) return 500, {error_msg = err} end @@ -224,33 +226,11 @@ function _M.patch(id, conf, sub_path, args) core.log.info("key: ", key, " old value: ", core.json.delay_encode(res_old, true)) - local node_value = res_old.body.node.value - local sub_value = node_value - local sub_paths = core.utils.split_uri(sub_path) - for i = 1, #sub_paths - 1 do - local sub_name = sub_paths[i] - if sub_value[sub_name] == nil then - sub_value[sub_name] = {} - end - sub_value = sub_value[sub_name] - - if type(sub_value) ~= "table" then - return 400, "invalid sub-path: /" - .. core.table.concat(sub_paths, 1, i) - end - end + local node_value = res_old.body.node.value - if type(sub_value) ~= "table" then - return 400, "invalid sub-path: /" .. sub_path - end + node_value = core.table.merge(node_value, conf); - local sub_name = sub_paths[#sub_paths] - if sub_name and sub_name ~= "" then - sub_value[sub_name] = conf - else - node_value = conf - end core.log.info("new conf: ", core.json.delay_encode(node_value, true)) local id, err = check_conf(id, node_value, true) @@ -261,7 +241,7 @@ function _M.patch(id, conf, sub_path, args) -- TODO: this is not safe, we need to use compare-set local res, err = core.etcd.set(key, node_value, args.ttl) if not res then - core.log.error("failed to set new route[", key, "]: ", err) + core.log.error("failed to set new route[", key, "] to etcd: ", err) return 500, {error_msg = err} end diff --git a/apisix/admin/services.lua b/apisix/admin/services.lua index e26ea41e63362..c10a215fd6102 100644 --- a/apisix/admin/services.lua +++ b/apisix/admin/services.lua @@ -20,7 +20,6 @@ local schema_plugin = require("apisix.admin.plugins").check_schema local upstreams = require("apisix.admin.upstreams") local tostring = tostring local ipairs = ipairs -local tonumber = tonumber local type = type @@ -47,6 +46,7 @@ local function check_conf(id, conf, need_id) return nil, {error_msg = "wrong service id"} end + conf.id = id core.log.info("schema: ", core.json.delay_encode(core.schema.service)) core.log.info("conf : ", core.json.delay_encode(conf)) @@ -55,7 +55,7 @@ local function check_conf(id, conf, need_id) return nil, {error_msg = "invalid configuration: " .. err} end - if need_id and not tonumber(id) then + if need_id and not id then return nil, {error_msg = "wrong type of service id"} end @@ -177,19 +177,19 @@ function _M.delete(id) end -function _M.patch(id, conf, sub_path) +function _M.patch(id, conf) if not id then return 400, {error_msg = "missing service id"} end - if not sub_path then - return 400, {error_msg = "missing sub-path"} - end - if not conf then return 400, {error_msg = "missing new configuration"} end + if type(conf) ~= "table" then + return 400, {error_msg = "invalid configuration"} + end + local key = "/services" .. "/" .. id local res_old, err = core.etcd.get(key) if not res_old then @@ -204,32 +204,9 @@ function _M.patch(id, conf, sub_path) core.json.delay_encode(res_old, true)) local new_value = res_old.body.node.value - local sub_value = new_value - local sub_paths = core.utils.split_uri(sub_path) - for i = 1, #sub_paths - 1 do - local sub_name = sub_paths[i] - if sub_value[sub_name] == nil then - sub_value[sub_name] = {} - end - sub_value = sub_value[sub_name] + new_value = core.table.merge(new_value, conf); - if type(sub_value) ~= "table" then - return 400, "invalid sub-path: /" - .. core.table.concat(sub_paths, 1, i) - end - end - - if type(sub_value) ~= "table" then - return 400, "invalid sub-path: /" .. sub_path - end - - local sub_name = sub_paths[#sub_paths] - if sub_name and sub_name ~= "" then - sub_value[sub_name] = conf - else - new_value = conf - end core.log.info("new value ", core.json.delay_encode(new_value, true)) local id, err = check_conf(id, new_value, true) diff --git a/apisix/admin/ssl.lua b/apisix/admin/ssl.lua index 898d9c1a988f3..6d9307d95d1da 100644 --- a/apisix/admin/ssl.lua +++ b/apisix/admin/ssl.lua @@ -14,10 +14,13 @@ -- See the License for the specific language governing permissions and -- limitations under the License. -- -local core = require("apisix.core") -local schema_plugin = require("apisix.admin.plugins").check_schema -local tostring = tostring - +local core = require("apisix.core") +local tostring = tostring +local aes = require "resty.aes" +local ngx_encode_base64 = ngx.encode_base64 +local str_find = string.find +local type = type +local assert = assert local _M = { version = 0.1, @@ -42,6 +45,8 @@ local function check_conf(id, conf, need_id) return nil, {error_msg = "wrong ssl id"} end + conf.id = id + core.log.info("schema: ", core.json.delay_encode(core.schema.ssl)) core.log.info("conf : ", core.json.delay_encode(conf)) local ok, err = core.schema.check(core.schema.ssl, conf) @@ -49,48 +54,31 @@ local function check_conf(id, conf, need_id) return nil, {error_msg = "invalid configuration: " .. err} end - local upstream_id = conf.upstream_id - if upstream_id then - local key = "/upstreams/" .. upstream_id - local res, err = core.etcd.get(key) - if not res then - return nil, {error_msg = "failed to fetch upstream info by " - .. "upstream id [" .. upstream_id .. "]: " - .. err} - end - - if res.status ~= 200 then - return nil, {error_msg = "failed to fetch upstream info by " - .. "upstream id [" .. upstream_id .. "], " - .. "response code: " .. res.status} - end - end + return need_id and id or true +end - local service_id = conf.service_id - if service_id then - local key = "/services/" .. service_id - local res, err = core.etcd.get(key) - if not res then - return nil, {error_msg = "failed to fetch service info by " - .. "service id [" .. service_id .. "]: " - .. err} - end - if res.status ~= 200 then - return nil, {error_msg = "failed to fetch service info by " - .. "service id [" .. service_id .. "], " - .. "response code: " .. res.status} - end +local function aes_encrypt(origin) + local local_conf = core.config.local_conf() + local iv + if local_conf and local_conf.apisix + and local_conf.apisix.ssl.key_encrypt_salt then + iv = local_conf.apisix.ssl.key_encrypt_salt end + local aes_128_cbc_with_iv = (type(iv)=="string" and #iv == 16) and + assert(aes:new(iv, nil, aes.cipher(128, "cbc"), {iv=iv})) or nil - if conf.plugins then - local ok, err = schema_plugin(conf.plugins) - if not ok then - return nil, {error_msg = err} + if aes_128_cbc_with_iv ~= nil and str_find(origin, "---") then + local encrypted = aes_128_cbc_with_iv:encrypt(origin) + if encrypted == nil then + core.log.error("failed to encrypt key[", origin, "] ") + return origin end + + return ngx_encode_base64(encrypted) end - return need_id and id or true + return origin end @@ -100,6 +88,9 @@ function _M.put(id, conf) return 400, err end + -- encrypt private key + conf.key = aes_encrypt(conf.key) + local key = "/ssl/" .. id local res, err = core.etcd.set(key, conf) if not res then @@ -138,6 +129,9 @@ function _M.post(id, conf) return 400, err end + -- encrypt private key + conf.key = aes_encrypt(conf.key) + local key = "/ssl" -- core.log.info("key: ", key) local res, err = core.etcd.push("/ssl", conf) @@ -167,4 +161,57 @@ function _M.delete(id) end +function _M.patch(id, conf) + if not id then + return 400, {error_msg = "missing route id"} + end + + if not conf then + return 400, {error_msg = "missing new configuration"} + end + + if type(conf) ~= "table" then + return 400, {error_msg = "invalid configuration"} + end + + local key = "/ssl" + if id then + key = key .. "/" .. id + end + + local res_old, err = core.etcd.get(key) + if not res_old then + core.log.error("failed to get ssl [", key, "] in etcd: ", err) + return 500, {error_msg = err} + end + + if res_old.status ~= 200 then + return res_old.status, res_old.body + end + core.log.info("key: ", key, " old value: ", + core.json.delay_encode(res_old, true)) + + + local node_value = res_old.body.node.value + + node_value = core.table.merge(node_value, conf); + + core.log.info("new ssl conf: ", core.json.delay_encode(node_value, true)) + + local id, err = check_conf(id, node_value, true) + if not id then + return 400, err + end + + -- TODO: this is not safe, we need to use compare-set + local res, err = core.etcd.set(key, node_value) + if not res then + core.log.error("failed to set new ssl[", key, "] to etcd: ", err) + return 500, {error_msg = err} + end + + return res.status, res.body +end + + return _M diff --git a/apisix/admin/stream_routes.lua b/apisix/admin/stream_routes.lua index e806da5e01d6b..969f775164e63 100644 --- a/apisix/admin/stream_routes.lua +++ b/apisix/admin/stream_routes.lua @@ -31,17 +31,19 @@ local function check_conf(id, conf, need_id) id = id or conf.id if need_id and not id then - return nil, {error_msg = "missing stream stream route id"} + return nil, {error_msg = "missing stream route id"} end if not need_id and id then - return nil, {error_msg = "wrong stream stream route id, do not need it"} + return nil, {error_msg = "wrong stream route id, do not need it"} end if need_id and conf.id and tostring(conf.id) ~= tostring(id) then - return nil, {error_msg = "wrong stream stream route id"} + return nil, {error_msg = "wrong stream route id"} end + conf.id = id + core.log.info("schema: ", core.json.delay_encode(core.schema.stream_route)) core.log.info("conf : ", core.json.delay_encode(conf)) local ok, err = core.schema.check(core.schema.stream_route, conf) @@ -129,7 +131,7 @@ end function _M.delete(id) if not id then - return 400, {error_msg = "missing stream stream route id"} + return 400, {error_msg = "missing stream route id"} end local key = "/stream_routes/" .. id diff --git a/apisix/admin/upstreams.lua b/apisix/admin/upstreams.lua index e989cd5527831..f09093ec8aae8 100644 --- a/apisix/admin/upstreams.lua +++ b/apisix/admin/upstreams.lua @@ -100,9 +100,7 @@ local function check_conf(id, conf, need_id) end -- let schema check id - if id and not conf.id then - conf.id = id - end + conf.id = id core.log.info("schema: ", core.json.delay_encode(core.schema.upstream)) core.log.info("conf : ", core.json.delay_encode(conf)) @@ -213,19 +211,19 @@ function _M.delete(id) end -function _M.patch(id, conf, sub_path) +function _M.patch(id, conf) if not id then return 400, {error_msg = "missing upstream id"} end - if not sub_path then - return 400, {error_msg = "missing sub-path"} - end - if not conf then return 400, {error_msg = "missing new configuration"} end + if type(conf) ~= "table" then + return 400, {error_msg = "invalid configuration"} + end + local key = "/upstreams" .. "/" .. id local res_old, err = core.etcd.get(key) if not res_old then @@ -240,32 +238,9 @@ function _M.patch(id, conf, sub_path) core.json.delay_encode(res_old, true)) local new_value = res_old.body.node.value - local sub_value = new_value - local sub_paths = core.utils.split_uri(sub_path) - for i = 1, #sub_paths - 1 do - local sub_name = sub_paths[i] - if sub_value[sub_name] == nil then - sub_value[sub_name] = {} - end - sub_value = sub_value[sub_name] + new_value = core.table.merge(new_value, conf); - if type(sub_value) ~= "table" then - return 400, "invalid sub-path: /" - .. core.table.concat(sub_paths, 1, i) - end - end - - if type(sub_value) ~= "table" then - return 400, "invalid sub-path: /" .. sub_path - end - - local sub_name = sub_paths[#sub_paths] - if sub_name and sub_name ~= "" then - sub_value[sub_name] = conf - else - new_value = conf - end core.log.info("new value ", core.json.delay_encode(new_value, true)) local id, err = check_conf(id, new_value, true) diff --git a/apisix/balancer.lua b/apisix/balancer.lua index a5134bcbd9280..1675128db0a81 100644 --- a/apisix/balancer.lua +++ b/apisix/balancer.lua @@ -14,23 +14,23 @@ -- See the License for the specific language governing permissions and -- limitations under the License. -- -local healthcheck = require("resty.healthcheck") -local roundrobin = require("resty.roundrobin") -local resty_chash = require("resty.chash") +local healthcheck +local require = require +local discovery = require("apisix.discovery.init").discovery local balancer = require("ngx.balancer") local core = require("apisix.core") -local error = error -local str_char = string.char -local str_gsub = string.gsub -local pairs = pairs +local ipairs = ipairs local tostring = tostring local set_more_tries = balancer.set_more_tries local get_last_failure = balancer.get_last_failure local set_timeouts = balancer.set_timeouts -local upstreams_etcd local module_name = "balancer" +local pickers = { + roundrobin = require("apisix.balancer.roundrobin"), + chash = require("apisix.balancer.chash"), +} local lrucache_server_picker = core.lrucache.new({ @@ -39,33 +39,43 @@ local lrucache_server_picker = core.lrucache.new({ local lrucache_checker = core.lrucache.new({ ttl = 300, count = 256 }) +local lrucache_addr = core.lrucache.new({ + ttl = 300, count = 1024 * 4 +}) local _M = { - version = 0.1, + version = 0.2, name = module_name, } local function fetch_health_nodes(upstream, checker) + local nodes = upstream.nodes if not checker then - return upstream.nodes + local new_nodes = core.table.new(0, #nodes) + for _, node in ipairs(nodes) do + -- TODO filter with metadata + new_nodes[node.host .. ":" .. node.port] = node.weight + end + return new_nodes end local host = upstream.checks and upstream.checks.host - local up_nodes = core.table.new(0, core.table.nkeys(upstream.nodes)) - - for addr, weight in pairs(upstream.nodes) do - local ip, port = core.utils.parse_addr(addr) - local ok = checker:get_target_status(ip, port, host) + local up_nodes = core.table.new(0, #nodes) + for _, node in ipairs(nodes) do + local ok = checker:get_target_status(node.host, node.port, host) if ok then - up_nodes[addr] = weight + -- TODO filter with metadata + up_nodes[node.host .. ":" .. node.port] = node.weight end end if core.table.nkeys(up_nodes) == 0 then core.log.warn("all upstream nodes is unhealth, use default") - up_nodes = upstream.nodes + for _, node in ipairs(nodes) do + up_nodes[node.host .. ":" .. node.port] = node.weight + end end return up_nodes @@ -73,18 +83,19 @@ end local function create_checker(upstream, healthcheck_parent) + if healthcheck == nil then + healthcheck = require("resty.healthcheck") + end local checker = healthcheck.new({ name = "upstream#" .. healthcheck_parent.key, shm_name = "upstream-healthcheck", checks = upstream.checks, }) - - for addr, weight in pairs(upstream.nodes) do - local ip, port = core.utils.parse_addr(addr) - local ok, err = checker:add_target(ip, port, upstream.checks.host) + for _, node in ipairs(upstream.nodes) do + local ok, err = checker:add_target(node.host, node.port, upstream.checks.host) if not ok then - core.log.error("failed to add new health check target: ", addr, - " err: ", err) + core.log.error("failed to add new health check target: ", node.host, ":", node.port, + " err: ", err) end end @@ -122,118 +133,60 @@ local function fetch_healthchecker(upstream, healthcheck_parent, version) end -local function fetch_chash_hash_key(ctx, upstream) - local key = upstream.key - local hash_on = upstream.hash_on or "vars" - local chash_key - - if hash_on == "consumer" then - chash_key = ctx.consumer_id - elseif hash_on == "vars" then - chash_key = ctx.var[key] - elseif hash_on == "header" then - chash_key = ctx.var["http_" .. key] - elseif hash_on == "cookie" then - chash_key = ctx.var["cookie_" .. key] - end - - if not chash_key then - chash_key = ctx.var["remote_addr"] - core.log.warn("chash_key fetch is nil, use default chash_key ", - "remote_addr: ", chash_key) - end - core.log.info("upstream key: ", key) - core.log.info("hash_on: ", hash_on) - core.log.info("chash_key: ", core.json.delay_encode(chash_key)) - - return chash_key -end - - local function create_server_picker(upstream, checker) - if upstream.type == "roundrobin" then + local picker = pickers[upstream.type] + if picker then local up_nodes = fetch_health_nodes(upstream, checker) core.log.info("upstream nodes: ", core.json.delay_encode(up_nodes)) - local picker = roundrobin:new(up_nodes) - return { - upstream = upstream, - get = function () - return picker:find() - end - } + return picker.new(up_nodes, upstream) end - if upstream.type == "chash" then - local up_nodes = fetch_health_nodes(upstream, checker) - core.log.info("upstream nodes: ", core.json.delay_encode(up_nodes)) - - local str_null = str_char(0) - - local servers, nodes = {}, {} - for serv, weight in pairs(up_nodes) do - local id = str_gsub(serv, ":", str_null) - - servers[id] = serv - nodes[id] = weight - end + return nil, "invalid balancer type: " .. upstream.type, 0 +end - local picker = resty_chash:new(nodes) - return { - upstream = upstream, - get = function (ctx) - local chash_key = fetch_chash_hash_key(ctx, upstream) - local id = picker:find(chash_key) - -- core.log.warn("chash id: ", id, " val: ", servers[id]) - return servers[id] - end - } - end - return nil, "invalid balancer type: " .. upstream.type, 0 +local function parse_addr(addr) + local host, port, err = core.utils.parse_addr(addr) + return {host = host, port = port}, err end local function pick_server(route, ctx) core.log.info("route: ", core.json.delay_encode(route, true)) core.log.info("ctx: ", core.json.delay_encode(ctx, true)) - local healthcheck_parent = route - local up_id = route.value.upstream_id - local up_conf = (route.dns_value and route.dns_value.upstream) - or route.value.upstream - if not up_id and not up_conf then - return nil, nil, "missing upstream configuration" + local up_conf = ctx.upstream_conf + if up_conf.service_name then + if not discovery then + return nil, "discovery is uninitialized" + end + up_conf.nodes = discovery.nodes(up_conf.service_name) end - local version - local key - - if up_id then - if not upstreams_etcd then - return nil, nil, "need to create a etcd instance for fetching " - .. "upstream information" - end + local nodes_count = up_conf.nodes and #up_conf.nodes or 0 + if nodes_count == 0 then + return nil, "no valid upstream node" + end - local up_obj = upstreams_etcd:get(tostring(up_id)) - if not up_obj then - return nil, nil, "failed to find upstream by id: " .. up_id + if up_conf.timeout then + local timeout = up_conf.timeout + local ok, err = set_timeouts(timeout.connect, timeout.send, + timeout.read) + if not ok then + core.log.error("could not set upstream timeouts: ", err) end - core.log.info("upstream: ", core.json.delay_encode(up_obj)) - - healthcheck_parent = up_obj - up_conf = up_obj.dns_value or up_obj.value - version = up_obj.modifiedIndex - key = up_conf.type .. "#upstream_" .. up_id - - else - version = ctx.conf_version - key = up_conf.type .. "#route_" .. route.value.id end - if core.table.nkeys(up_conf.nodes) == 0 then - return nil, nil, "no valid upstream node" + if nodes_count == 1 then + local node = up_conf.nodes[1] + ctx.balancer_ip = node.host + ctx.balancer_port = node.port + return node end + local healthcheck_parent = ctx.upstream_healthcheck_parent + local version = ctx.upstream_version + local key = ctx.upstream_key local checker = fetch_healthchecker(up_conf, healthcheck_parent, version) ctx.balancer_try_count = (ctx.balancer_try_count or 0) + 1 @@ -256,11 +209,10 @@ local function pick_server(route, ctx) if ctx.balancer_try_count == 1 then local retries = up_conf.retries - if retries and retries > 0 then - set_more_tries(retries) - else - set_more_tries(core.table.nkeys(up_conf.nodes)) + if not retries or retries <= 0 then + retries = #up_conf.nodes end + set_more_tries(retries) end if checker then @@ -270,45 +222,43 @@ local function pick_server(route, ctx) local server_picker = lrucache_server_picker(key, version, create_server_picker, up_conf, checker) if not server_picker then - return nil, nil, "failed to fetch server picker" + return nil, "failed to fetch server picker" end local server, err = server_picker.get(ctx) if not server then err = err or "no valid upstream node" - return nil, nil, "failed to find valid upstream server, " .. err + return nil, "failed to find valid upstream server, " .. err end - if up_conf.timeout then - local timeout = up_conf.timeout - local ok, err = set_timeouts(timeout.connect, timeout.send, - timeout.read) - if not ok then - core.log.error("could not set upstream timeouts: ", err) - end + local res, err = lrucache_addr(server, nil, parse_addr, server) + ctx.balancer_ip = res.host + ctx.balancer_port = res.port + -- core.log.info("proxy to ", host, ":", port) + if err then + core.log.error("failed to parse server addr: ", server, " err: ", err) + return core.response.exit(502) end - local ip, port, err = core.utils.parse_addr(server) - ctx.balancer_ip = ip - ctx.balancer_port = port - - return ip, port, err + return res end + + -- for test _M.pick_server = pick_server function _M.run(route, ctx) - local ip, port, err = pick_server(route, ctx) - if err then + local server, err = pick_server(route, ctx) + if not server then core.log.error("failed to pick server: ", err) return core.response.exit(502) end - local ok, err = balancer.set_current_peer(ip, port) + local ok, err = balancer.set_current_peer(server.host, server.port) if not ok then - core.log.error("failed to set server peer [", ip, ":", port, - "] err: ", err) + core.log.error("failed to set server peer [", server.host, ":", + server.port, "] err: ", err) return core.response.exit(502) end @@ -317,34 +267,6 @@ end function _M.init_worker() - local err - upstreams_etcd, err = core.config.new("/upstreams", { - automatic = true, - item_schema = core.schema.upstream, - filter = function(upstream) - upstream.has_domain = false - if not upstream.value then - return - end - - for addr, _ in pairs(upstream.value.nodes or {}) do - local host = core.utils.parse_addr(addr) - if not core.utils.parse_ipv4(host) and - not core.utils.parse_ipv6(host) then - upstream.has_domain = true - break - end - end - - core.log.info("filter upstream: ", - core.json.delay_encode(upstream)) - end, - }) - if not upstreams_etcd then - error("failed to create etcd instance for fetching upstream: " .. err) - return - end end - return _M diff --git a/apisix/balancer/chash.lua b/apisix/balancer/chash.lua new file mode 100644 index 0000000000000..38831cdb4e489 --- /dev/null +++ b/apisix/balancer/chash.lua @@ -0,0 +1,80 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local resty_chash = require("resty.chash") +local str_char = string.char +local str_gsub = string.gsub +local pairs = pairs + + +local _M = {} + + +local function fetch_chash_hash_key(ctx, upstream) + local key = upstream.key + local hash_on = upstream.hash_on or "vars" + local chash_key + + if hash_on == "consumer" then + chash_key = ctx.consumer_id + elseif hash_on == "vars" then + chash_key = ctx.var[key] + elseif hash_on == "header" then + chash_key = ctx.var["http_" .. key] + elseif hash_on == "cookie" then + chash_key = ctx.var["cookie_" .. key] + end + + if not chash_key then + chash_key = ctx.var["remote_addr"] + core.log.warn("chash_key fetch is nil, use default chash_key ", + "remote_addr: ", chash_key) + end + core.log.info("upstream key: ", key) + core.log.info("hash_on: ", hash_on) + core.log.info("chash_key: ", core.json.delay_encode(chash_key)) + + return chash_key +end + + +function _M.new(up_nodes, upstream) + local str_null = str_char(0) + + local servers, nodes = {}, {} + for serv, weight in pairs(up_nodes) do + local id = str_gsub(serv, ":", str_null) + + servers[id] = serv + nodes[id] = weight + end + + local picker = resty_chash:new(nodes) + return { + upstream = upstream, + get = function (ctx) + local chash_key = fetch_chash_hash_key(ctx, upstream) + local id = picker:find(chash_key) + -- core.log.warn("chash id: ", id, " val: ", servers[id]) + return servers[id] + end + } +end + + +return _M diff --git a/apisix/balancer/roundrobin.lua b/apisix/balancer/roundrobin.lua new file mode 100644 index 0000000000000..dac4f03ea10d6 --- /dev/null +++ b/apisix/balancer/roundrobin.lua @@ -0,0 +1,34 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local roundrobin = require("resty.roundrobin") + +local _M = {} + + +function _M.new(up_nodes, upstream) + local picker = roundrobin:new(up_nodes) + return { + upstream = upstream, + get = function () + return picker:find() + end + } +end + + +return _M diff --git a/apisix/core/config_etcd.lua b/apisix/core/config_etcd.lua index dd0a5487bac1f..fbb6df8dbf5d5 100644 --- a/apisix/core/config_etcd.lua +++ b/apisix/core/config_etcd.lua @@ -308,6 +308,7 @@ local function sync_data(self) key = short_key(self, self.values[i].key) self.values_hash[key] = i end + self.sync_times = 0 end self.conf_version = self.conf_version + 1 diff --git a/apisix/core/config_yaml.lua b/apisix/core/config_yaml.lua index 7803deccf4e16..eed861fafaf35 100644 --- a/apisix/core/config_yaml.lua +++ b/apisix/core/config_yaml.lua @@ -58,7 +58,10 @@ local mt = { local apisix_yaml local apisix_yaml_ctime -local function read_apisix_yaml(pre_mtime) +local function read_apisix_yaml(premature, pre_mtime) + if premature then + return + end local attributes, err = lfs.attributes(apisix_yaml_path) if not attributes then log.error("failed to fetch ", apisix_yaml_path, " attributes: ", err) diff --git a/apisix/core/table.lua b/apisix/core/table.lua index 0fc64acc34440..e666e162a2293 100644 --- a/apisix/core/table.lua +++ b/apisix/core/table.lua @@ -25,13 +25,14 @@ local type = type local _M = { - version = 0.1, + version = 0.2, new = new_tab, clear = require("table.clear"), nkeys = nkeys, insert = table.insert, concat = table.concat, clone = require("table.clone"), + isarray = require("table.isarray"), } @@ -84,5 +85,24 @@ local function deepcopy(orig) end _M.deepcopy = deepcopy +local ngx_null = ngx.null +local function merge(origin, extend) + for k,v in pairs(extend) do + if type(v) == "table" then + if type(origin[k] or false) == "table" then + merge(origin[k] or {}, extend[k] or {}) + else + origin[k] = v + end + elseif v == ngx_null then + origin[k] = nil + else + origin[k] = v + end + end + + return origin +end +_M.merge = merge return _M diff --git a/apisix/core/version.lua b/apisix/core/version.lua index dfd10502979b8..c3606c206e533 100644 --- a/apisix/core/version.lua +++ b/apisix/core/version.lua @@ -15,5 +15,5 @@ -- limitations under the License. -- return { - VERSION = "1.2" + VERSION = "1.4" } diff --git a/apisix/discovery/eureka.lua b/apisix/discovery/eureka.lua new file mode 100644 index 0000000000000..d4b4368536170 --- /dev/null +++ b/apisix/discovery/eureka.lua @@ -0,0 +1,253 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local local_conf = require("apisix.core.config_local").local_conf() +local http = require("resty.http") +local core = require("apisix.core") +local ipmatcher = require("resty.ipmatcher") +local ipairs = ipairs +local tostring = tostring +local type = type +local math_random = math.random +local error = error +local ngx = ngx +local ngx_timer_at = ngx.timer.at +local ngx_timer_every = ngx.timer.every +local string_sub = string.sub +local string_find = string.find +local log = core.log + +local default_weight +local applications + +local schema = { + type = "object", + properties = { + host = { + type = "array", + minItems = 1, + items = { + type = "string", + }, + }, + fetch_interval = {type = "integer", minimum = 1, default = 30}, + prefix = {type = "string"}, + weight = {type = "integer", minimum = 0}, + timeout = { + type = "object", + properties = { + connect = {type = "integer", minimum = 1, default = 2000}, + send = {type = "integer", minimum = 1, default = 2000}, + read = {type = "integer", minimum = 1, default = 5000}, + } + }, + }, + required = {"host"} +} + + +local _M = { + version = 0.1, +} + + +local function service_info() + local host = local_conf.eureka and local_conf.eureka.host + if not host then + log.error("do not set eureka.host") + return + end + + local basic_auth + -- TODO Add health check to get healthy nodes. + local url = host[math_random(#host)] + local auth_idx = string_find(url, "@", 1, true) + if auth_idx then + local protocol_idx = string_find(url, "://", 1, true) + local protocol = string_sub(url, 1, protocol_idx + 2) + local user_and_password = string_sub(url, protocol_idx + 3, auth_idx - 1) + local other = string_sub(url, auth_idx + 1) + url = protocol .. other + basic_auth = "Basic " .. ngx.encode_base64(user_and_password) + end + if local_conf.eureka.prefix then + url = url .. local_conf.eureka.prefix + end + if string_sub(url, #url) ~= "/" then + url = url .. "/" + end + + return url, basic_auth +end + + +local function request(request_uri, basic_auth, method, path, query, body) + log.info("eureka uri:", request_uri, ".") + local url = request_uri .. path + local headers = core.table.new(0, 5) + headers['Connection'] = 'Keep-Alive' + headers['Accept'] = 'application/json' + + if basic_auth then + headers['Authorization'] = basic_auth + end + + if body and 'table' == type(body) then + local err + body, err = core.json.encode(body) + if not body then + return nil, 'invalid body : ' .. err + end + -- log.warn(method, url, body) + headers['Content-Type'] = 'application/json' + end + + local httpc = http.new() + local timeout = local_conf.eureka.timeout + local connect_timeout = timeout and timeout.connect or 2000 + local send_timeout = timeout and timeout.send or 2000 + local read_timeout = timeout and timeout.read or 5000 + log.info("connect_timeout:", connect_timeout, ", send_timeout:", send_timeout, + ", read_timeout:", read_timeout, ".") + httpc:set_timeouts(connect_timeout, send_timeout, read_timeout) + return httpc:request_uri(url, { + version = 1.1, + method = method, + headers = headers, + query = query, + body = body, + ssl_verify = false, + }) +end + + +local function parse_instance(instance) + local status = instance.status + local overridden_status = instance.overriddenstatus or instance.overriddenStatus + if overridden_status and overridden_status ~= "UNKNOWN" then + status = overridden_status + end + + if status ~= "UP" then + return + end + local port + if tostring(instance.port["@enabled"]) == "true" and instance.port["$"] then + port = instance.port["$"] + -- secure = false + end + if tostring(instance.securePort["@enabled"]) == "true" and instance.securePort["$"] then + port = instance.securePort["$"] + -- secure = true + end + local ip = instance.ipAddr + if not ipmatcher.parse_ipv4(ip) and + not ipmatcher.parse_ipv6(ip) then + log.error(instance.app, " service ", instance.hostName, " node IP ", ip, + " is invalid(must be IPv4 or IPv6).") + return + end + return ip, port, instance.metadata +end + + +local function fetch_full_registry(premature) + if premature then + return + end + + local request_uri, basic_auth = service_info() + if not request_uri then + return + end + + local res, err = request(request_uri, basic_auth, "GET", "apps") + if not res then + log.error("failed to fetch registry", err) + return + end + + if not res.body or res.status ~= 200 then + log.error("failed to fetch registry, status = ", res.status) + return + end + + local json_str = res.body + local data, err = core.json.decode(json_str) + if not data then + log.error("invalid response body: ", json_str, " err: ", err) + return + end + local apps = data.applications.application + local up_apps = core.table.new(0, #apps) + for _, app in ipairs(apps) do + for _, instance in ipairs(app.instance) do + local ip, port, metadata = parse_instance(instance) + if ip and port then + local nodes = up_apps[app.name] + if not nodes then + nodes = core.table.new(#app.instance, 0) + up_apps[app.name] = nodes + end + core.table.insert(nodes, { + host = ip, + port = port, + weight = metadata and metadata.weight or default_weight, + metadata = metadata, + }) + if metadata then + -- remove useless data + metadata.weight = nil + end + end + end + end + applications = up_apps +end + + +function _M.nodes(service_name) + if not applications then + log.error("failed to fetch nodes for : ", service_name) + return + end + + return applications[service_name] +end + + +function _M.init_worker() + if not local_conf.eureka or not local_conf.eureka.host or #local_conf.eureka.host == 0 then + error("do not set eureka.host") + return + end + + local ok, err = core.schema.check(schema, local_conf.eureka) + if not ok then + error("invalid eureka configuration: " .. err) + return + end + default_weight = local_conf.eureka.weight or 100 + log.info("default_weight:", default_weight, ".") + local fetch_interval = local_conf.eureka.fetch_interval or 30 + log.info("fetch_interval:", fetch_interval, ".") + ngx_timer_at(0, fetch_full_registry) + ngx_timer_every(fetch_interval, fetch_full_registry) +end + + +return _M diff --git a/apisix/discovery/init.lua b/apisix/discovery/init.lua new file mode 100644 index 0000000000000..16aafe62c50d5 --- /dev/null +++ b/apisix/discovery/init.lua @@ -0,0 +1,33 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local log = require("apisix.core.log") +local local_conf = require("apisix.core.config_local").local_conf() + +local discovery_type = local_conf.apisix and local_conf.apisix.discovery +local discovery + +if discovery_type then + log.info("use discovery: ", discovery_type) + discovery = require("apisix.discovery." .. discovery_type) +end + + +return { + version = 0.1, + discovery = discovery +} diff --git a/apisix/http/router/radixtree_sni.lua b/apisix/http/router/radixtree_sni.lua index 0ecc3bf3cbb2a..1eb7aa54508b4 100644 --- a/apisix/http/router/radixtree_sni.lua +++ b/apisix/http/router/radixtree_sni.lua @@ -18,9 +18,14 @@ local get_request = require("resty.core.base").get_request local radixtree_new = require("resty.radixtree").new local core = require("apisix.core") local ngx_ssl = require("ngx.ssl") -local ipairs = ipairs +local config_util = require("apisix.core.config_util") +local ipairs = ipairs local type = type local error = error +local str_find = string.find +local aes = require "resty.aes" +local assert = assert +local ngx_decode_base64 = ngx.decode_base64 local ssl_certificates local radixtree_router local radixtree_router_ver @@ -38,9 +43,44 @@ local function create_router(ssl_items) local route_items = core.table.new(#ssl_items, 0) local idx = 0 - for _, ssl in ipairs(ssl_items) do - if type(ssl) == "table" then - local sni = ssl.value.sni:reverse() + local local_conf = core.config.local_conf() + local iv + if local_conf and local_conf.apisix + and local_conf.apisix.ssl + and local_conf.apisix.ssl.key_encrypt_salt then + iv = local_conf.apisix.ssl.key_encrypt_salt + end + local aes_128_cbc_with_iv = (type(iv)=="string" and #iv == 16) and + assert(aes:new(iv, nil, aes.cipher(128, "cbc"), {iv=iv})) or nil + + for _, ssl in config_util.iterate_values(ssl_items) do + if ssl.value ~= nil and + (ssl.value.status == nil or ssl.value.status == 1) then -- compatible with old version + + local j = 0 + local sni + if type(ssl.value.snis) == "table" and #ssl.value.snis > 0 then + sni = core.table.new(0, #ssl.value.snis) + for _, s in ipairs(ssl.value.snis) do + j = j + 1 + sni[j] = s:reverse() + end + else + sni = ssl.value.sni:reverse() + end + + -- decrypt private key + if aes_128_cbc_with_iv ~= nil and + not str_find(ssl.value.key, "---") then + local decrypted = aes_128_cbc_with_iv:decrypt(ngx_decode_base64(ssl.value.key)) + if decrypted == nil then + core.log.error("decrypt ssl key failed. key[", ssl.value.key, "] ") + else + ssl.value.key = decrypted + end + end + + local idx = idx + 1 route_items[idx] = { paths = sni, @@ -49,6 +89,7 @@ local function create_router(ssl_items) return end api_ctx.matched_ssl = ssl + api_ctx.matched_sni = sni end } end @@ -114,14 +155,37 @@ function _M.match_and_set(api_ctx) end core.log.debug("sni: ", sni) - local ok = radixtree_router:dispatch(sni:reverse(), nil, api_ctx) + + local sni_rev = sni:reverse() + local ok = radixtree_router:dispatch(sni_rev, nil, api_ctx) if not ok then core.log.warn("not found any valid sni configuration") return false end + + if type(api_ctx.matched_sni) == "table" then + local matched = false + for _, msni in ipairs(api_ctx.matched_sni) do + if sni_rev == msni or not str_find(sni_rev, ".", #msni, true) then + matched = true + end + end + if not matched then + core.log.warn("not found any valid sni configuration, matched sni: ", + core.json.delay_encode(api_ctx.matched_sni, true), " current sni: ", sni) + return false + end + else + if str_find(sni_rev, ".", #api_ctx.matched_sni, true) then + core.log.warn("not found any valid sni configuration, matched sni: ", + api_ctx.matched_sni:reverse(), " current sni: ", sni) + return false + end + end + local matched_ssl = api_ctx.matched_ssl - core.log.info("debug: ", core.json.delay_encode(matched_ssl, true)) + core.log.info("debug - matched: ", core.json.delay_encode(matched_ssl, true)) ok, err = set_pem_ssl_key(matched_ssl.value.cert, matched_ssl.value.key) if not ok then return false, err @@ -135,7 +199,7 @@ function _M.init_worker() local err ssl_certificates, err = core.config.new("/ssl", { automatic = true, - item_schema = core.schema.ssl + item_schema = core.schema.ssl, }) if not ssl_certificates then error("failed to create etcd instance for fetching ssl certificates: " diff --git a/apisix/http/service.lua b/apisix/http/service.lua index 42d31dd58b3c0..161d82fe23588 100644 --- a/apisix/http/service.lua +++ b/apisix/http/service.lua @@ -14,7 +14,8 @@ -- See the License for the specific language governing permissions and -- limitations under the License. -- -local core = require("apisix.core") +local core = require("apisix.core") +local ipairs = ipairs local services local error = error local pairs = pairs @@ -45,17 +46,36 @@ local function filter(service) return end - if not service.value.upstream then + if not service.value.upstream or not service.value.upstream.nodes then return end - for addr, _ in pairs(service.value.upstream.nodes or {}) do - local host = core.utils.parse_addr(addr) - if not core.utils.parse_ipv4(host) and - not core.utils.parse_ipv6(host) then - service.has_domain = true - break + local nodes = service.value.upstream.nodes + if core.table.isarray(nodes) then + for _, node in ipairs(nodes) do + local host = node.host + if not core.utils.parse_ipv4(host) and + not core.utils.parse_ipv6(host) then + service.has_domain = true + break + end end + else + local new_nodes = core.table.new(core.table.nkeys(nodes), 0) + for addr, weight in pairs(nodes) do + local host, port = core.utils.parse_addr(addr) + if not core.utils.parse_ipv4(host) and + not core.utils.parse_ipv6(host) then + service.has_domain = true + end + local node = { + host = host, + port = port, + weight = weight, + } + core.table.insert(new_nodes, node) + end + service.value.upstream.nodes = new_nodes end core.log.info("filter service: ", core.json.delay_encode(service)) diff --git a/apisix/init.lua b/apisix/init.lua index 48c5b8098bdf9..41295f095dd6c 100644 --- a/apisix/init.lua +++ b/apisix/init.lua @@ -22,13 +22,14 @@ local service_fetch = require("apisix.http.service").get local admin_init = require("apisix.admin.init") local get_var = require("resty.ngxvar").fetch local router = require("apisix.router") +local set_upstream = require("apisix.upstream").set_by_route local ipmatcher = require("resty.ipmatcher") local ngx = ngx local get_method = ngx.req.get_method local ngx_exit = ngx.exit local math = math local error = error -local pairs = pairs +local ipairs = ipairs local tostring = tostring local load_balancer @@ -42,7 +43,7 @@ local function parse_args(args) end -local _M = {version = 0.3} +local _M = {version = 0.4} function _M.http_init(args) @@ -74,7 +75,10 @@ function _M.http_init_worker() if not ok then error("failed to init worker event: " .. err) end - + local discovery = require("apisix.discovery.init").discovery + if discovery and discovery.init_worker then + discovery.init_worker() + end require("apisix.balancer").init_worker() load_balancer = require("apisix.balancer").run require("apisix.admin.init").init_worker() @@ -89,6 +93,7 @@ function _M.http_init_worker() end require("apisix.debug").init_worker() + require("apisix.upstream").init_worker() local local_conf = core.config.local_conf() local dns_resolver_valid = local_conf and local_conf.apisix and @@ -107,30 +112,7 @@ local function run_plugin(phase, plugins, api_ctx) end plugins = plugins or api_ctx.plugins - if not plugins then - return api_ctx - end - - if phase == "balancer" then - local balancer_name = api_ctx.balancer_name - local balancer_plugin = api_ctx.balancer_plugin - if balancer_name and balancer_plugin then - local phase_fun = balancer_plugin[phase] - phase_fun(balancer_plugin, api_ctx) - return api_ctx - end - - for i = 1, #plugins, 2 do - local phase_fun = plugins[i][phase] - if phase_fun and - (not balancer_name or balancer_name == plugins[i].name) then - phase_fun(plugins[i + 1], api_ctx) - if api_ctx.balancer_name == plugins[i].name then - api_ctx.balancer_plugin = plugins[i] - return api_ctx - end - end - end + if not plugins or #plugins == 0 then return api_ctx end @@ -179,71 +161,71 @@ function _M.http_ssl_phase() end -local function parse_domain_in_up(up, ver) - local new_nodes = core.table.new(0, 8) - for addr, weight in pairs(up.value.nodes) do - local host, port = core.utils.parse_addr(addr) +local function parse_domain(host) + local ip_info, err = core.utils.dns_parse(dns_resolver, host) + if not ip_info then + core.log.error("failed to parse domain for ", host, ", error:",err) + return nil, err + end + + core.log.info("parse addr: ", core.json.delay_encode(ip_info)) + core.log.info("resolver: ", core.json.delay_encode(dns_resolver)) + core.log.info("host: ", host) + if ip_info.address then + core.log.info("dns resolver domain: ", host, " to ", ip_info.address) + return ip_info.address + else + return nil, "failed to parse domain" + end +end + + +local function parse_domain_for_nodes(nodes) + local new_nodes = core.table.new(#nodes, 0) + for _, node in ipairs(nodes) do + local host = node.host if not ipmatcher.parse_ipv4(host) and - not ipmatcher.parse_ipv6(host) then - local ip_info, err = core.utils.dns_parse(dns_resolver, host) - if not ip_info then - return nil, err + not ipmatcher.parse_ipv6(host) then + local ip, err = parse_domain(host) + if ip then + local new_node = core.table.clone(node) + new_node.host = ip + core.table.insert(new_nodes, new_node) end - core.log.info("parse addr: ", core.json.delay_encode(ip_info)) - core.log.info("resolver: ", core.json.delay_encode(dns_resolver)) - core.log.info("host: ", host) - if ip_info.address then - new_nodes[ip_info.address .. ":" .. port] = weight - core.log.info("dns resolver domain: ", host, " to ", - ip_info.address) - else - return nil, "failed to parse domain in route" + if err then + return nil, err end else - new_nodes[addr] = weight + core.table.insert(new_nodes, node) end end + return new_nodes +end + +local function parse_domain_in_up(up, ver) + local nodes = up.value.nodes + local new_nodes, err = parse_domain_for_nodes(nodes) + if not new_nodes then + return nil, err + end up.dns_value = core.table.clone(up.value) up.dns_value.nodes = new_nodes - core.log.info("parse upstream which contain domain: ", - core.json.delay_encode(up)) + core.log.info("parse upstream which contain domain: ", core.json.delay_encode(up)) return up end local function parse_domain_in_route(route, ver) - local new_nodes = core.table.new(0, 8) - for addr, weight in pairs(route.value.upstream.nodes) do - local host, port = core.utils.parse_addr(addr) - if not ipmatcher.parse_ipv4(host) and - not ipmatcher.parse_ipv6(host) then - local ip_info, err = core.utils.dns_parse(dns_resolver, host) - if not ip_info then - return nil, err - end - - core.log.info("parse addr: ", core.json.delay_encode(ip_info)) - core.log.info("resolver: ", core.json.delay_encode(dns_resolver)) - core.log.info("host: ", host) - if ip_info and ip_info.address then - new_nodes[ip_info.address .. ":" .. port] = weight - core.log.info("dns resolver domain: ", host, " to ", - ip_info.address) - else - return nil, "failed to parse domain in route" - end - - else - new_nodes[addr] = weight - end + local nodes = route.value.upstream.nodes + local new_nodes, err = parse_domain_for_nodes(nodes) + if not new_nodes then + return nil, err end - route.dns_value = core.table.deepcopy(route.value) route.dns_value.upstream.nodes = new_nodes - core.log.info("parse route which contain domain: ", - core.json.delay_encode(route)) + core.log.info("parse route which contain domain: ", core.json.delay_encode(route)) return route end @@ -280,6 +262,8 @@ function _M.http_access_phase() api_ctx.conf_type = nil api_ctx.conf_version = nil api_ctx.conf_id = nil + + api_ctx.global_rules = router.global_rules end router.router_http.match(api_ctx) @@ -380,6 +364,12 @@ function _M.http_access_phase() end end run_plugin("access", plugins, api_ctx) + + local ok, err = set_upstream(route, api_ctx) + if not ok then + core.log.error("failed to parse upstream: ", err) + core.response.exit(500) + end end @@ -440,38 +430,43 @@ function _M.grpc_access_phase() run_plugin("rewrite", plugins, api_ctx) run_plugin("access", plugins, api_ctx) + + set_upstream(route, api_ctx) end -local function common_phase(plugin_name) + +local function common_phase(phase_name) local api_ctx = ngx.ctx.api_ctx if not api_ctx then return end - if router.global_rules and router.global_rules.values - and #router.global_rules.values > 0 - then + if api_ctx.global_rules then local plugins = core.tablepool.fetch("plugins", 32, 0) - local values = router.global_rules.values + local values = api_ctx.global_rules.values for _, global_rule in config_util.iterate_values(values) do core.table.clear(plugins) plugins = plugin.filter(global_rule, plugins) - run_plugin(plugin_name, plugins, api_ctx) + run_plugin(phase_name, plugins, api_ctx) end core.tablepool.release("plugins", plugins) end - run_plugin(plugin_name, nil, api_ctx) + + run_plugin(phase_name, nil, api_ctx) return api_ctx end + function _M.http_header_filter_phase() common_phase("header_filter") end + function _M.http_body_filter_phase() common_phase("body_filter") end + function _M.http_log_phase() local api_ctx = common_phase("log") @@ -488,6 +483,7 @@ function _M.http_log_phase() core.tablepool.release("api_ctx", api_ctx) end + function _M.http_balancer_phase() local api_ctx = ngx.ctx.api_ctx if not api_ctx then @@ -495,22 +491,10 @@ function _M.http_balancer_phase() return core.response.exit(500) end - -- first time - if not api_ctx.balancer_name then - run_plugin("balancer", nil, api_ctx) - if api_ctx.balancer_name then - return - end - end - - if api_ctx.balancer_name and api_ctx.balancer_name ~= "default" then - return run_plugin("balancer", nil, api_ctx) - end - - api_ctx.balancer_name = "default" load_balancer(api_ctx.matched_route, api_ctx) end + local function cors_admin() local local_conf = core.config.local_conf() if local_conf.apisix and not local_conf.apisix.enable_admin_cors then @@ -536,6 +520,10 @@ local function cors_admin() "Access-Control-Max-Age", "3600") end +local function add_content_type() + core.response.set_header("Content-Type", "application/json") +end + do local router @@ -547,6 +535,9 @@ function _M.http_admin() -- add cors rsp header cors_admin() + -- add content type to rsp header + add_content_type() + -- core.log.info("uri: ", get_var("uri"), " method: ", get_method()) local ok = router:dispatch(get_var("uri"), {method = get_method()}) if not ok then @@ -606,7 +597,13 @@ function _M.stream_preread_phase() api_ctx.plugins = plugin.stream_filter(matched_route, plugins) -- core.log.info("valid plugins: ", core.json.delay_encode(plugins, true)) + api_ctx.conf_type = "stream/route" + api_ctx.conf_version = matched_route.modifiedIndex + api_ctx.conf_id = matched_route.value.id + run_plugin("preread", plugins, api_ctx) + + set_upstream(matched_route, api_ctx) end @@ -618,19 +615,6 @@ function _M.stream_balancer_phase() return ngx_exit(1) end - -- first time - if not api_ctx.balancer_name then - run_plugin("balancer", nil, api_ctx) - if api_ctx.balancer_name then - return - end - end - - if api_ctx.balancer_name and api_ctx.balancer_name ~= "default" then - return run_plugin("balancer", nil, api_ctx) - end - - api_ctx.balancer_name = "default" load_balancer(api_ctx.matched_route, api_ctx) end diff --git a/apisix/plugin.lua b/apisix/plugin.lua index 8186d155af615..075d0584358be 100644 --- a/apisix/plugin.lua +++ b/apisix/plugin.lua @@ -235,7 +235,8 @@ end function _M.filter(user_route, plugins) plugins = plugins or core.table.new(#local_plugins * 2, 0) local user_plugin_conf = user_route.value.plugins - if user_plugin_conf == nil then + if user_plugin_conf == nil or + core.table.nkeys(user_plugin_conf) == 0 then if local_conf and local_conf.apisix.enable_debug then core.response.set_header("Apisix-Plugins", "no plugin") end diff --git a/apisix/plugins/authz-keycloak.lua b/apisix/plugins/authz-keycloak.lua new file mode 100644 index 0000000000000..2704f4ef03563 --- /dev/null +++ b/apisix/plugins/authz-keycloak.lua @@ -0,0 +1,165 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local http = require "resty.http" +local sub_str = string.sub +local url = require "net.url" +local tostring = tostring +local ngx = ngx +local plugin_name = "authz-keycloak" + + +local schema = { + type = "object", + properties = { + token_endpoint = {type = "string", minLength = 1, maxLength = 4096}, + permissions = { + type = "array", + items = { + type = "string", + minLength = 1, maxLength = 100 + }, + uniqueItems = true + }, + grant_type = { + type = "string", + default="urn:ietf:params:oauth:grant-type:uma-ticket", + enum = {"urn:ietf:params:oauth:grant-type:uma-ticket"}, + minLength = 1, maxLength = 100 + }, + audience = {type = "string", minLength = 1, maxLength = 100}, + timeout = {type = "integer", minimum = 1000, default = 3000}, + policy_enforcement_mode = { + type = "string", + enum = {"ENFORCING", "PERMISSIVE"}, + default = "ENFORCING" + }, + keepalive = {type = "boolean", default = true}, + keepalive_timeout = {type = "integer", minimum = 1000, default = 60000}, + keepalive_pool = {type = "integer", minimum = 1, default = 5}, + + }, + required = {"token_endpoint"} +} + + +local _M = { + version = 0.1, + priority = 2000, + type = 'auth', + name = plugin_name, + schema = schema, +} + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + +local function is_path_protected(conf) + -- TODO if permissions are empty lazy load paths from Keycloak + if conf.permissions == nil then + return false + end + return true +end + + +local function evaluate_permissions(conf, token) + local url_decoded = url.parse(conf.token_endpoint) + local host = url_decoded.host + local port = url_decoded.port + + if not port then + if url_decoded.scheme == "https" then + port = 443 + else + port = 80 + end + end + + if not is_path_protected(conf) and conf.policy_enforcement_mode == "ENFORCING" then + core.response.exit(403) + return + end + + local httpc = http.new() + httpc:set_timeout(conf.timeout) + + local params = { + method = "POST", + body = ngx.encode_args({ + grant_type = conf.grant_type, + audience = conf.audience, + response_mode = "decision", + permission = conf.permissions + }), + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded", + ["Authorization"] = token + } + } + + if conf.keepalive then + params.keepalive_timeout = conf.keepalive_timeout + params.keepalive_pool = conf.keepalive_pool + else + params.keepalive = conf.keepalive + end + + local httpc_res, httpc_err = httpc:request_uri(conf.token_endpoint, params) + + if not httpc_res then + core.log.error("error while sending authz request to [", host ,"] port[", + tostring(port), "] ", httpc_err) + core.response.exit(500, httpc_err) + return + end + + if httpc_res.status >= 400 then + core.log.error("status code: ", httpc_res.status, " msg: ", httpc_res.body) + core.response.exit(httpc_res.status, httpc_res.body) + end +end + + +local function fetch_jwt_token(ctx) + local token = core.request.header(ctx, "authorization") + if not token then + return nil, "authorization header not available" + end + + local prefix = sub_str(token, 1, 7) + if prefix ~= 'Bearer ' and prefix ~= 'bearer ' then + return "Bearer " .. token + end + return token +end + + +function _M.rewrite(conf, ctx) + core.log.debug("hit keycloak-auth rewrite") + local jwt_token, err = fetch_jwt_token(ctx) + if not jwt_token then + core.log.error("failed to fetch JWT token: ", err) + return 401, {message = "Missing JWT token in request"} + end + + evaluate_permissions(conf, jwt_token) +end + + +return _M diff --git a/apisix/plugins/batch-requests.lua b/apisix/plugins/batch-requests.lua index 34d784a89f970..71878218d8d7e 100644 --- a/apisix/plugins/batch-requests.lua +++ b/apisix/plugins/batch-requests.lua @@ -14,12 +14,15 @@ -- See the License for the specific language governing permissions and -- limitations under the License. -- -local core = require("apisix.core") -local http = require("resty.http") -local ngx = ngx -local io_open = io.open -local ipairs = ipairs -local pairs = pairs +local core = require("apisix.core") +local http = require("resty.http") +local ngx = ngx +local io_open = io.open +local ipairs = ipairs +local pairs = pairs +local str_find = string.find +local str_lower = string.lower + local plugin_name = "batch-requests" @@ -112,18 +115,42 @@ local function check_input(data) end end +local function lowercase_key_or_init(obj) + if not obj then + return {} + end -local function set_common_header(data) - if not data.headers then - return + local lowercase_key_obj = {} + for k, v in pairs(obj) do + lowercase_key_obj[str_lower(k)] = v end + return lowercase_key_obj +end + +local function ensure_header_lowercase(data) + data.headers = lowercase_key_or_init(data.headers) + for i,req in ipairs(data.pipeline) do - if not req.headers then - req.headers = data.headers - else - for k, v in pairs(data.headers) do - if not req.headers[k] then + req.headers = lowercase_key_or_init(req.headers) + end +end + + +local function set_common_header(data) + local outer_headers = core.request.headers(nil) + for i,req in ipairs(data.pipeline) do + for k, v in pairs(data.headers) do + if not req.headers[k] then + req.headers[k] = v + end + end + + if outer_headers then + for k, v in pairs(outer_headers) do + local is_content_header = str_find(k, "content-", 1, true) == 1 + -- skip header start with "content-" + if not req.headers[k] and not is_content_header then req.headers[k] = v end end @@ -198,8 +225,10 @@ local function batch_requests() core.response.exit(500, {error_msg = "connect to apisix failed: " .. err}) end + ensure_header_lowercase(data) set_common_header(data) set_common_query(data) + local responses, err = httpc:request_pipeline(data.pipeline) if not responses then core.response.exit(400, {error_msg = "request failed: " .. err}) diff --git a/apisix/plugins/consumer-restriction.lua b/apisix/plugins/consumer-restriction.lua new file mode 100644 index 0000000000000..912e2129a8cc2 --- /dev/null +++ b/apisix/plugins/consumer-restriction.lua @@ -0,0 +1,94 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local ipairs = ipairs +local core = require("apisix.core") + +local schema = { + type = "object", + properties = { + whitelist = { + type = "array", + items = {type = "string"}, + minItems = 1 + }, + blacklist = { + type = "array", + items = {type = "string"}, + minItems = 1 + } + }, + oneOf = { + {required = {"whitelist"}}, + {required = {"blacklist"}} + } +} + + +local plugin_name = "consumer-restriction" + + +local _M = { + version = 0.1, + priority = 2400, + name = plugin_name, + schema = schema, +} + +local function is_include(value, tab) + for k,v in ipairs(tab) do + if v == value then + return true + end + end + return false +end + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + + if not ok then + return false, err + end + + return true +end + +function _M.access(conf, ctx) + if not ctx.consumer then + return 401, { message = "Missing authentication or identity verification." } + end + + local block = false + if conf.blacklist and #conf.blacklist > 0 then + if is_include(ctx.consumer.username, conf.blacklist) then + block = true + end + end + + if conf.whitelist and #conf.whitelist > 0 then + if not is_include(ctx.consumer.username, conf.whitelist) then + block = true + end + end + + if block then + return 403, { message = "The consumer is not allowed" } + end +end + + +return _M diff --git a/apisix/plugins/cors.lua b/apisix/plugins/cors.lua index b64010e299771..9fe91c94dd582 100644 --- a/apisix/plugins/cors.lua +++ b/apisix/plugins/cors.lua @@ -71,11 +71,34 @@ local schema = { local _M = { version = 0.1, priority = 4000, - type = 'auth', name = plugin_name, schema = schema, } +local function create_mutiple_origin_cache(conf) + if not str_find(conf.allow_origins, ",", 1, true) then + return nil + end + local origin_cache = {} + local iterator, err = re_gmatch(conf.allow_origins, "([^,]+)", "jiox") + if not iterator then + core.log.error("match origins failed: ", err) + return nil + end + while true do + local origin, err = iterator() + if err then + core.log.error("iterate origins failed: ", err) + return nil + end + if not origin then + break + end + origin_cache[origin[0]] = true + end + return origin_cache +end + function _M.check_schema(conf) local ok, err = core.schema.check(schema, conf) if not ok then @@ -85,63 +108,48 @@ function _M.check_schema(conf) return true end -function _M.access(conf, ctx) +local function set_cors_headers(conf, ctx) + local allow_methods = conf.allow_methods + if allow_methods == "**" then + allow_methods = "GET,POST,PUT,DELETE,PATCH,HEAD,OPTIONS,CONNECT,TRACE" + end + + ngx.header["Access-Control-Allow-Origin"] = ctx.cors_allow_origins + ngx.header["Access-Control-Allow-Methods"] = allow_methods + ngx.header["Access-Control-Allow-Headers"] = conf.allow_headers + ngx.header["Access-Control-Max-Age"] = conf.max_age + if conf.allow_credential then + ngx.header["Access-Control-Allow-Credentials"] = true + end + ngx.header["Access-Control-Expose-Headers"] = conf.expose_headers +end + +function _M.rewrite(conf, ctx) local allow_origins = conf.allow_origins + local req_origin = core.request.header(ctx, "Origin") if allow_origins == "**" then - allow_origins = ngx.var.http_origin or '*' + allow_origins = req_origin or '*' + end + local multiple_origin, err = core.lrucache.plugin_ctx(plugin_name, ctx, + create_mutiple_origin_cache, conf) + if err then + return 500, {message = "get mutiple origin cache failed: " .. err} end - if str_find(allow_origins, ",", 1, true) then - local finded = false - local iterator, err = re_gmatch(allow_origins, "([^,]+)", "jiox") - if not iterator then - return 500, {message = "match origins failed", error = err} - end - while true do - local origin, err = iterator() - if err then - return 500, {message = "iterate origins failed", error = err} - end - if not origin then - break - end - if origin[0] == ngx.var.http_origin then - allow_origins = origin[0] - finded = true - break - end - end - if not finded then + if multiple_origin then + if multiple_origin[req_origin] then + allow_origins = req_origin + else return end end ctx.cors_allow_origins = allow_origins + set_cors_headers(conf, ctx) if ctx.var.request_method == "OPTIONS" then return 200 end end -function _M.header_filter(conf, ctx) - if not ctx.cors_allow_origins then - -- no origin matched, don't add headers - return - end - - local allow_methods = conf.allow_methods - if allow_methods == "**" then - allow_methods = "GET,POST,PUT,DELETE,PATCH,HEAD,OPTIONS,CONNECT,TRACE" - end - - ngx.header["Access-Control-Allow-Origin"] = ctx.cors_allow_origins - ngx.header["Access-Control-Allow-Methods"] = allow_methods - ngx.header["Access-Control-Allow-Headers"] = conf.allow_headers - ngx.header["Access-Control-Expose-Headers"] = conf.expose_headers - ngx.header["Access-Control-Max-Age"] = conf.max_age - if conf.allow_credential then - ngx.header["Access-Control-Allow-Credentials"] = true - end -end - return _M diff --git a/apisix/plugins/echo.lua b/apisix/plugins/echo.lua new file mode 100644 index 0000000000000..76ab4e57e5c62 --- /dev/null +++ b/apisix/plugins/echo.lua @@ -0,0 +1,119 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local pairs = pairs +local type = type +local ngx = ngx + + +local schema = { + type = "object", + properties = { + before_body = { + description = "body before the filter phase.", + type = "string" + }, + body = { + description = "body to replace upstream response.", + type = "string" + }, + after_body = { + description = "body after the modification of filter phase.", + type = "string" + }, + headers = { + description = "new headers for repsonse", + type = "object", + minProperties = 1, + }, + auth_value = { + description = "auth value", + type = "string" + } + }, + anyOf = { + {required = {"before_body"}}, + {required = {"body"}}, + {required = {"after_body"}} + }, + minProperties = 1 +} + +local plugin_name = "echo" + +local _M = { + version = 0.1, + priority = 412, + name = plugin_name, + schema = schema, +} + +function _M.check_schema(conf) + if conf.headers then + conf.headers_arr = {} + + for field, value in pairs(conf.headers) do + if type(field) == 'string' + and (type(value) == 'string' or type(value) == 'number') then + if #field == 0 then + return false, 'invalid field length in header' + end + core.table.insert(conf.headers_arr, field) + core.table.insert(conf.headers_arr, value) + else + return false, 'invalid type as header value' + end + end + end + + return core.schema.check(schema, conf) +end + +function _M.body_filter(conf, ctx) + if conf.body then + ngx.arg[1] = conf.body + end + + if conf.before_body then + ngx.arg[1] = conf.before_body .. ngx.arg[1] + end + + if conf.after_body then + ngx.arg[1] = ngx.arg[1] .. conf.after_body + end + ngx.arg[2] = true +end + +function _M.access(conf, ctx) + local value = core.request.header(ctx, "Authorization") + + if value ~= conf.auth_value then + return 401, "unauthorized body" + end + +end + +function _M.header_filter(conf, ctx) + if conf.headers_arr then + local field_cnt = #conf.headers_arr + for i = 1, field_cnt, 2 do + ngx.header[conf.headers_arr[i]] = conf.headers_arr[i+1] + end + end +end + +return _M diff --git a/apisix/plugins/example-plugin.lua b/apisix/plugins/example-plugin.lua index 025ade4fd8e06..bf36837985110 100644 --- a/apisix/plugins/example-plugin.lua +++ b/apisix/plugins/example-plugin.lua @@ -15,7 +15,7 @@ -- limitations under the License. -- local core = require("apisix.core") -local balancer = require("ngx.balancer") +local upstream = require("apisix.upstream") local schema = { type = "object", @@ -60,25 +60,27 @@ end function _M.access(conf, ctx) core.log.warn("plugin access phase, conf: ", core.json.encode(conf)) -- return 200, {message = "hit example plugin"} -end - - -function _M.balancer(conf, ctx) - core.log.warn("plugin balancer phase, conf: ", core.json.encode(conf)) if not conf.ip then return end - -- NOTE: update `ctx.balancer_name` is important, APISIX will skip other - -- balancer handler. - ctx.balancer_name = plugin_name + local up_conf = { + type = "roundrobin", + nodes = { + {host = conf.ip, port = conf.port, weight = 1} + } + } - local ok, err = balancer.set_current_peer(conf.ip, conf.port) + local ok, err = upstream.check_schema(up_conf) if not ok then - core.log.error("failed to set server peer: ", err) - return core.response.exit(502) + return 500, err end + + local matched_route = ctx.matched_route + upstream.set(ctx, up_conf.type .. "#route_" .. matched_route.value.id, + ctx.conf_version, up_conf, matched_route) + return end diff --git a/apisix/plugins/grpc-transcode/util.lua b/apisix/plugins/grpc-transcode/util.lua index 83d89abaf2a01..d705a1ed7126e 100644 --- a/apisix/plugins/grpc-transcode/util.lua +++ b/apisix/plugins/grpc-transcode/util.lua @@ -51,7 +51,7 @@ local function get_from_request(name, kind) local request_table if ngx.req.get_method() == "POST" then if string.find(ngx.req.get_headers()["Content-Type"] or "", - "application/json", true) then + "application/json", 1, true) then request_table = json.decode(ngx.req.get_body_data()) else request_table = ngx.req.get_post_args() diff --git a/apisix/plugins/heartbeat.lua b/apisix/plugins/heartbeat.lua index 0a6cf76cbdc53..ed4fa2c208a21 100644 --- a/apisix/plugins/heartbeat.lua +++ b/apisix/plugins/heartbeat.lua @@ -114,7 +114,7 @@ local function report() local res res, err = request_apisix_svr(args) if not res then - core.log.error("failed to report heartbeat information: ", err) + core.log.info("failed to report heartbeat information: ", err) return end diff --git a/apisix/plugins/http-logger.lua b/apisix/plugins/http-logger.lua new file mode 100644 index 0000000000000..44df6aeff99cb --- /dev/null +++ b/apisix/plugins/http-logger.lua @@ -0,0 +1,176 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local log_util = require("apisix.utils.log-util") +local batch_processor = require("apisix.utils.batch-processor") +local plugin_name = "http-logger" +local ngx = ngx +local tostring = tostring +local http = require "resty.http" +local url = require "net.url" +local buffers = {} + +local schema = { + type = "object", + properties = { + uri = {type = "string"}, + auth_header = {type = "string", default = ""}, + timeout = {type = "integer", minimum = 1, default = 3}, + name = {type = "string", default = "http logger"}, + max_retry_count = {type = "integer", minimum = 0, default = 0}, + retry_delay = {type = "integer", minimum = 0, default = 1}, + buffer_duration = {type = "integer", minimum = 1, default = 60}, + inactive_timeout = {type = "integer", minimum = 1, default = 5}, + batch_max_size = {type = "integer", minimum = 1, default = 1000}, + include_req_body = {type = "boolean", default = false} + }, + required = {"uri"} +} + + +local _M = { + version = 0.1, + priority = 410, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +local function send_http_data(conf, log_message) + local err_msg + local res = true + local url_decoded = url.parse(conf.uri) + local host = url_decoded.host + local port = url_decoded.port + + if ((not port) and url_decoded.scheme == "https") then + port = 443 + elseif not port then + port = 80 + end + + local httpc = http.new() + httpc:set_timeout(conf.timeout * 1000) + local ok, err = httpc:connect(host, port) + + if not ok then + return false, "failed to connect to host[" .. host .. "] port[" + .. tostring(port) .. "] " .. err + end + + if url_decoded.scheme == "https" then + ok, err = httpc:ssl_handshake(true, host, false) + if not ok then + return nil, "failed to perform SSL with host[" .. host .. "] " + .. "port[" .. tostring(port) .. "] " .. err + end + end + + local httpc_res, httpc_err = httpc:request({ + method = "POST", + path = url_decoded.path, + query = url_decoded.query, + body = log_message, + headers = { + ["Host"] = url_decoded.host, + ["Content-Type"] = "application/json", + ["Authorization"] = conf.auth_header + } + }) + + if not httpc_res then + return false, "error while sending data to [" .. host .. "] port[" + .. tostring(port) .. "] " .. httpc_err + end + + -- some error occurred in the server + if httpc_res.status >= 400 then + res = false + err_msg = "server returned status code[" .. httpc_res.status .. "] host[" + .. host .. "] port[" .. tostring(port) .. "] " + .. "body[" .. httpc_res:read_body() .. "]" + end + + -- keep the connection alive + ok, err = httpc:set_keepalive(conf.keepalive) + + if not ok then + core.log.debug("failed to keep the connection alive", err) + end + + return res, err_msg +end + + +function _M.log(conf) + local entry = log_util.get_full_log(ngx, conf) + + if not entry.route_id then + core.log.error("failed to obtain the route id for http logger") + return + end + + local log_buffer = buffers[entry.route_id] + + if log_buffer then + log_buffer:push(entry) + return + end + + -- Generate a function to be executed by the batch processor + local func = function(entries, batch_max_size) + local data, err + if batch_max_size == 1 then + data, err = core.json.encode(entries[1]) -- encode as single {} + else + data, err = core.json.encode(entries) -- encode as array [{}] + end + + if not data then + return false, 'error occurred while encoding the data: ' .. err + end + + return send_http_data(conf, data) + end + + local config = { + name = conf.name, + retry_delay = conf.retry_delay, + batch_max_size = conf.batch_max_size, + max_retry_count = conf.max_retry_count, + buffer_duration = conf.buffer_duration, + inactive_timeout = conf.inactive_timeout, + } + + local err + log_buffer, err = batch_processor:new(func, config) + + if not log_buffer then + core.log.error("error when creating the batch processor: ", err) + return + end + + buffers[entry.route_id] = log_buffer + log_buffer:push(entry) +end + +return _M diff --git a/apisix/plugins/ip-restriction.lua b/apisix/plugins/ip-restriction.lua index ab4deed3a0d86..f08c9c7ccbd01 100644 --- a/apisix/plugins/ip-restriction.lua +++ b/apisix/plugins/ip-restriction.lua @@ -110,7 +110,7 @@ function _M.check_schema(conf) end -local function create_ip_mather(ip_list) +local function create_ip_matcher(ip_list) local ip, err = ipmatcher.new(ip_list) if not ip then core.log.error("failed to create ip matcher: ", err, @@ -128,7 +128,7 @@ function _M.access(conf, ctx) if conf.blacklist and #conf.blacklist > 0 then local matcher = lrucache(conf.blacklist, nil, - create_ip_mather, conf.blacklist) + create_ip_matcher, conf.blacklist) if matcher then block = matcher:match(remote_addr) end @@ -136,7 +136,7 @@ function _M.access(conf, ctx) if conf.whitelist and #conf.whitelist > 0 then local matcher = lrucache(conf.whitelist, nil, - create_ip_mather, conf.whitelist) + create_ip_matcher, conf.whitelist) if matcher then block = not matcher:match(remote_addr) end diff --git a/apisix/plugins/kafka-logger.lua b/apisix/plugins/kafka-logger.lua index a9050b9d6080f..fc7d90cde719f 100644 --- a/apisix/plugins/kafka-logger.lua +++ b/apisix/plugins/kafka-logger.lua @@ -21,7 +21,11 @@ local batch_processor = require("apisix.utils.batch-processor") local pairs = pairs local type = type local table = table +local ipairs = ipairs local plugin_name = "kafka-logger" +local stale_timer_running = false; +local timer_at = ngx.timer.at +local tostring = tostring local ngx = ngx local buffers = {} @@ -40,6 +44,7 @@ local schema = { buffer_duration = {type = "integer", minimum = 1, default = 60}, inactive_timeout = {type = "integer", minimum = 1, default = 5}, batch_max_size = {type = "integer", minimum = 1, default = 1000}, + include_req_body = {type = "boolean", default = false} }, required = {"broker_list", "kafka_topic", "key"} } @@ -89,9 +94,25 @@ local function send_kafka_data(conf, log_message) end end +-- remove stale objects from the memory after timer expires +local function remove_stale_objects(premature) + if premature then + return + end + + for key, batch in ipairs(buffers) do + if #batch.entry_buffer.entries == 0 and #batch.batch_to_process == 0 then + core.log.debug("removing batch processor stale object, route id:", tostring(key)) + buffers[key] = nil + end + end + + stale_timer_running = false +end + function _M.log(conf) - local entry = log_util.get_full_log(ngx) + local entry = log_util.get_full_log(ngx, conf) if not entry.route_id then core.log.error("failed to obtain the route id for kafka logger") @@ -100,6 +121,12 @@ function _M.log(conf) local log_buffer = buffers[entry.route_id] + if not stale_timer_running then + -- run the timer every 30 mins if any log is present + timer_at(1800, remove_stale_objects) + stale_timer_running = true + end + if log_buffer then log_buffer:push(entry) return diff --git a/apisix/plugins/limit-conn.lua b/apisix/plugins/limit-conn.lua index dbffbabb8277c..6ca46d5d1df7f 100644 --- a/apisix/plugins/limit-conn.lua +++ b/apisix/plugins/limit-conn.lua @@ -30,9 +30,9 @@ local schema = { enum = {"remote_addr", "server_addr", "http_x_real_ip", "http_x_forwarded_for"}, }, - rejected_code = {type = "integer", minimum = 200}, + rejected_code = {type = "integer", minimum = 200, default = 503}, }, - required = {"conn", "burst", "default_conn_delay", "key", "rejected_code"} + required = {"conn", "burst", "default_conn_delay", "key"} } diff --git a/apisix/plugins/limit-count.lua b/apisix/plugins/limit-count.lua index 42db2d54784b4..3e9d4af28adee 100644 --- a/apisix/plugins/limit-count.lua +++ b/apisix/plugins/limit-count.lua @@ -34,7 +34,8 @@ local schema = { enum = {"remote_addr", "server_addr", "http_x_real_ip", "http_x_forwarded_for"}, }, - rejected_code = {type = "integer", minimum = 200, maximum = 600}, + rejected_code = {type = "integer", minimum = 200, maximum = 600, + default = 503}, policy = { type = "string", enum = {"local", "redis"}, @@ -53,7 +54,7 @@ local schema = { }, }, additionalProperties = false, - required = {"count", "time_window", "key", "rejected_code"}, + required = {"count", "time_window", "key"}, } diff --git a/apisix/plugins/limit-req.lua b/apisix/plugins/limit-req.lua index e35c4b328e514..1caadce8b2f11 100644 --- a/apisix/plugins/limit-req.lua +++ b/apisix/plugins/limit-req.lua @@ -29,9 +29,9 @@ local schema = { enum = {"remote_addr", "server_addr", "http_x_real_ip", "http_x_forwarded_for"}, }, - rejected_code = {type = "integer", minimum = 200}, + rejected_code = {type = "integer", minimum = 200, default = 503}, }, - required = {"rate", "burst", "key", "rejected_code"} + required = {"rate", "burst", "key"} } diff --git a/apisix/plugins/openid-connect.lua b/apisix/plugins/openid-connect.lua index 6a93226f9baac..2572c856ca955 100644 --- a/apisix/plugins/openid-connect.lua +++ b/apisix/plugins/openid-connect.lua @@ -116,11 +116,12 @@ local function introspect(ctx, conf) end else res, err = openidc.introspect(conf) - if res then + if err then + return ngx.HTTP_UNAUTHORIZED, err + else return res end end - if conf.bearer_only then ngx.header["WWW-Authenticate"] = 'Bearer realm="' .. conf.realm .. '",error="' .. err .. '"' diff --git a/apisix/plugins/prometheus/exporter.lua b/apisix/plugins/prometheus/exporter.lua index 538deaba3a496..2c0b6fc618abf 100644 --- a/apisix/plugins/prometheus/exporter.lua +++ b/apisix/plugins/prometheus/exporter.lua @@ -14,26 +14,48 @@ -- See the License for the specific language governing permissions and -- limitations under the License. -- -local base_prometheus = require("resty.prometheus") +local base_prometheus = require("prometheus") local core = require("apisix.core") local ipairs = ipairs local ngx = ngx local ngx_capture = ngx.location.capture local re_gmatch = ngx.re.gmatch +local tonumber = tonumber +local select = select local prometheus -- Default set of latency buckets, 1ms to 60s: local DEFAULT_BUCKETS = { 1, 2, 5, 7, 10, 15, 20, 25, 30, 40, 50, 60, 70, 80, 90, 100, 200, 300, 400, 500, 1000, - 2000, 5000, 10000, 30000, 60000 } + 2000, 5000, 10000, 30000, 60000 +} local metrics = {} -local _M = {version = 0.3} + local inner_tab_arr = {} + local clear_tab = core.table.clear +local function gen_arr(...) + clear_tab(inner_tab_arr) + + for i = 1, select('#', ...) do + inner_tab_arr[i] = select(i, ...) + end + + return inner_tab_arr +end + + +local _M = {} function _M.init() + -- todo: support hot reload, we may need to update the lua-prometheus + -- library + if ngx.get_phase() ~= "init" and ngx.get_phase() ~= "init_worker" then + return + end + core.table.clear(metrics) -- across all services @@ -54,6 +76,10 @@ function _M.init() "HTTP request latency per service in APISIX", {"type", "service", "node"}, DEFAULT_BUCKETS) + metrics.overhead = prometheus:histogram("http_overhead", + "HTTP request overhead per service in APISIX", + {"type", "service", "node"}, DEFAULT_BUCKETS) + metrics.bandwidth = prometheus:counter("bandwidth", "Total bandwidth in bytes consumed per service in APISIX", {"type", "route", "service", "node"}) @@ -75,21 +101,31 @@ function _M.log(conf, ctx) service_id = vars.host end - metrics.status:inc(1, vars.status, route_id, service_id, balancer_ip) + metrics.status:inc(1, + gen_arr(vars.status, route_id, service_id, balancer_ip)) local latency = (ngx.now() - ngx.req.start_time()) * 1000 - metrics.latency:observe(latency, "request", service_id, balancer_ip) + metrics.latency:observe(latency, + gen_arr("request", service_id, balancer_ip)) + + local overhead = latency + if ctx.var.upstream_response_time then + overhead = overhead - tonumber(ctx.var.upstream_response_time) * 1000 + end + metrics.overhead:observe(overhead, + gen_arr("request", service_id, balancer_ip)) - metrics.bandwidth:inc(vars.request_length, "ingress", route_id, service_id, - balancer_ip) + metrics.bandwidth:inc(vars.request_length, + gen_arr("ingress", route_id, service_id, balancer_ip)) - metrics.bandwidth:inc(vars.bytes_sent, "egress", route_id, service_id, - balancer_ip) + metrics.bandwidth:inc(vars.bytes_sent, + gen_arr("egress", route_id, service_id, balancer_ip)) end local ngx_statu_items = {"active", "accepted", "handled", "total", "reading", "writing", "waiting"} + local label_values = {} local function nginx_status() local res = ngx_capture("/apisix/nginx_status") if not res or res.status ~= 200 then @@ -114,7 +150,8 @@ local function nginx_status() break end - metrics.connections:set(val[0], name) + label_values[1] = name + metrics.connections:set(val[0], label_values) end end diff --git a/apisix/plugins/redirect.lua b/apisix/plugins/redirect.lua index 6cc28ac31307d..a9df21f26b5a0 100644 --- a/apisix/plugins/redirect.lua +++ b/apisix/plugins/redirect.lua @@ -30,8 +30,12 @@ local schema = { properties = { ret_code = {type = "integer", minimum = 200, default = 302}, uri = {type = "string", minLength = 2}, + http_to_https = {type = "boolean"}, -- default is false }, - required = {"uri"}, + oneOf = { + {required = {"uri"}}, + {required = {"http_to_https"}} + } } @@ -80,11 +84,13 @@ function _M.check_schema(conf) return false, err end - local uri_segs, err = parse_uri(conf.uri) - if not uri_segs then - return false, err + if conf.uri then + local uri_segs, err = parse_uri(conf.uri) + if not uri_segs then + return false, err + end + core.log.info(core.json.delay_encode(uri_segs)) end - core.log.info(core.json.delay_encode(uri_segs)) return true end @@ -120,15 +126,22 @@ end function _M.rewrite(conf, ctx) core.log.info("plugin rewrite phase, conf: ", core.json.delay_encode(conf)) - local new_uri, err = concat_new_uri(conf.uri, ctx) - if not new_uri then - core.log.error("failed to generate new uri by: ", conf.uri, " error: ", - err) - core.response.exit(500) + if conf.http_to_https and ctx.var.scheme == "http" then + conf.uri = "https://$host$request_uri" + conf.ret_code = 301 end - core.response.set_header("Location", new_uri) - core.response.exit(conf.ret_code) + if conf.uri and conf.ret_code then + local new_uri, err = concat_new_uri(conf.uri, ctx) + if not new_uri then + core.log.error("failed to generate new uri by: ", conf.uri, " error: ", + err) + core.response.exit(500) + end + + core.response.set_header("Location", new_uri) + core.response.exit(conf.ret_code) + end end diff --git a/apisix/plugins/skywalking.lua b/apisix/plugins/skywalking.lua new file mode 100644 index 0000000000000..f95286bd8d143 --- /dev/null +++ b/apisix/plugins/skywalking.lua @@ -0,0 +1,80 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local ngx = ngx +local math = math + +local sw_client = require("apisix.plugins.skywalking.client") +local sw_tracer = require("apisix.plugins.skywalking.tracer") + +local plugin_name = "skywalking" + + +local schema = { + type = "object", + properties = { + endpoint = {type = "string"}, + sample_ratio = {type = "number", minimum = 0.00001, maximum = 1, default = 1} + }, + service_name = { + type = "string", + description = "service name for skywalking", + default = "APISIX", + }, + required = {"endpoint"} +} + + +local _M = { + version = 0.1, + priority = -1100, -- last running plugin, but before serverless post func + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +function _M.rewrite(conf, ctx) + core.log.debug("rewrite phase of skywalking plugin") + ctx.skywalking_sample = false + if conf.sample_ratio == 1 or math.random() < conf.sample_ratio then + ctx.skywalking_sample = true + sw_client.heartbeat(conf) + -- Currently, we can not have the upstream real network address + sw_tracer.start(ctx, conf.endpoint, "upstream service") + end +end + + +function _M.body_filter(conf, ctx) + if ctx.skywalking_sample and ngx.arg[2] then + sw_tracer.finish(ctx) + end +end + + +function _M.log(conf, ctx) + if ctx.skywalking_sample then + sw_tracer.prepareForReport(ctx, conf.endpoint) + end +end + +return _M diff --git a/apisix/plugins/skywalking/client.lua b/apisix/plugins/skywalking/client.lua new file mode 100644 index 0000000000000..f83a6e35bf803 --- /dev/null +++ b/apisix/plugins/skywalking/client.lua @@ -0,0 +1,232 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local http = require("resty.http") +local cjson = require('cjson') +local ngx = ngx +local ipairs = ipairs + +local register = require("skywalking.register") + +local _M = {} + +local function register_service(conf) + local endpoint = conf.endpoint + + local tracing_buffer = ngx.shared['skywalking-tracing-buffer'] + local service_id = tracing_buffer:get(endpoint .. '_service_id') + if service_id then + return service_id + end + + local service_name = conf.service_name + local service = register.newServiceRegister(service_name) + + local httpc = http.new() + local res, err = httpc:request_uri(endpoint .. '/v2/service/register', + { + method = "POST", + body = core.json.encode(service), + headers = { + ["Content-Type"] = "application/json", + }, + }) + if not res then + core.log.error("skywalking service register failed, request uri: ", + endpoint .. '/v2/service/register', ", err: ", err) + + elseif res.status == 200 then + core.log.debug("skywalking service register response: ", res.body) + local register_results = cjson.decode(res.body) + + for _, result in ipairs(register_results) do + if result.key == service_name then + service_id = result.value + core.log.debug("skywalking service registered, service id:" + .. service_id) + end + end + + else + core.log.error("skywalking service register failed, request uri:", + endpoint .. "/v2/service/register", + ", response code:", res.status) + end + + if service_id then + tracing_buffer:set(endpoint .. '_service_id', service_id) + end + + return service_id +end + +local function register_service_instance(conf, service_id) + local endpoint = conf.endpoint + + local tracing_buffer = ngx.shared['skywalking-tracing-buffer'] + local instance_id = tracing_buffer:get(endpoint .. '_instance_id') + if instance_id then + return instance_id + end + + local service_instance_name = core.id.get() + local service_instance = register.newServiceInstanceRegister( + service_id, + service_instance_name, + ngx.now() * 1000) + + local httpc = http.new() + local res, err = httpc:request_uri(endpoint .. '/v2/instance/register', + { + method = "POST", + body = core.json.encode(service_instance), + headers = { + ["Content-Type"] = "application/json", + }, + }) + + if not res then + core.log.error("skywalking service Instance register failed", + ", request uri: ", conf.endpoint .. '/v2/instance/register', + ", err: ", err) + + elseif res.status == 200 then + core.log.debug("skywalking service instance register response: ", res.body) + local register_results = cjson.decode(res.body) + + for _, result in ipairs(register_results) do + if result.key == service_instance_name then + instance_id = result.value + end + end + + else + core.log.error("skywalking service instance register failed, ", + "response code:", res.status) + end + + if instance_id then + tracing_buffer:set(endpoint .. '_instance_id', instance_id) + end + + return instance_id +end + +local function ping(endpoint) + local tracing_buffer = ngx.shared['skywalking-tracing-buffer'] + local ping_pkg = register.newServiceInstancePingPkg( + tracing_buffer:get(endpoint .. '_instance_id'), + core.id.get(), + ngx.now() * 1000) + + local httpc = http.new() + local res, err = httpc:request_uri(endpoint .. '/v2/instance/heartbeat', { + method = "POST", + body = core.json.encode(ping_pkg), + headers = { + ["Content-Type"] = "application/json", + }, + }) + + if err then + core.log.error("skywalking agent ping failed, err: ", err) + else + core.log.debug(res.body) + end +end + +-- report trace segments to the backend +local function report_traces(endpoint) + local tracing_buffer = ngx.shared['skywalking-tracing-buffer'] + local segment = tracing_buffer:rpop(endpoint .. '_segment') + + local count = 0 + + local httpc = http.new() + + while segment ~= nil do + local res, err = httpc:request_uri(endpoint .. '/v2/segments', { + method = "POST", + body = segment, + headers = { + ["Content-Type"] = "application/json", + }, + }) + + if err == nil then + if res.status ~= 200 then + core.log.error("skywalking segment report failed, response code ", res.status) + break + else + count = count + 1 + core.log.debug(res.body) + end + else + core.log.error("skywalking segment report failed, err: ", err) + break + end + + segment = tracing_buffer:rpop('segment') + end + + if count > 0 then + core.log.debug(count, " skywalking segments reported") + end +end + +do + local heartbeat_timer + +function _M.heartbeat(conf) + local sw_heartbeat = function() + local service_id = register_service(conf) + if not service_id then + return + end + + core.log.debug("skywalking service registered, ", + "service id: ", service_id) + + local service_instance_id = register_service_instance(conf, service_id) + if not service_instance_id then + return + end + + core.log.debug("skywalking service Instance registered, ", + "service instance id: ", service_instance_id) + report_traces(conf.endpoint) + ping(conf.endpoint) + end + + local err + if ngx.worker.id() == 0 and not heartbeat_timer then + heartbeat_timer, err = core.timer.new("skywalking_heartbeat", + sw_heartbeat, + {check_interval = 3} + ) + if not heartbeat_timer then + core.log.error("failed to create skywalking_heartbeat timer: ", err) + else + core.log.info("succeed to create timer: skywalking heartbeat") + end + end +end + +end -- do + + +return _M diff --git a/apisix/plugins/skywalking/tracer.lua b/apisix/plugins/skywalking/tracer.lua new file mode 100644 index 0000000000000..187b941edf46d --- /dev/null +++ b/apisix/plugins/skywalking/tracer.lua @@ -0,0 +1,101 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local span = require("skywalking.span") +local tracing_context = require("skywalking.tracing_context") +local span_layer = require("skywalking.span_layer") +local sw_segment = require('skywalking.segment') + +local pairs = pairs +local ngx = ngx + +-- Constant pre-defined in SkyWalking main repo +-- 84 represents Nginx +local NGINX_COMPONENT_ID = 6000 + +local _M = {} + +function _M.start(ctx, endpoint, upstream_name) + local context + -- TODO: use lrucache for better performance + local tracing_buffer = ngx.shared['skywalking-tracing-buffer'] + local instance_id = tracing_buffer:get(endpoint .. '_instance_id') + local service_id = tracing_buffer:get(endpoint .. '_service_id') + + if service_id and instance_id then + context = tracing_context.new(service_id, instance_id) + else + context = tracing_context.newNoOP() + end + + local context_carrier = {} + context_carrier["sw6"] = ngx.req.get_headers()["sw6"] + local entry_span = tracing_context.createEntrySpan(context, ctx.var.uri, nil, context_carrier) + span.start(entry_span, ngx.now() * 1000) + span.setComponentId(entry_span, NGINX_COMPONENT_ID) + span.setLayer(entry_span, span_layer.HTTP) + + span.tag(entry_span, 'http.method', ngx.req.get_method()) + span.tag(entry_span, 'http.params', ctx.var.scheme .. '://' + .. ctx.var.host .. ctx.var.request_uri) + + context_carrier = {} + local exit_span = tracing_context.createExitSpan(context, + ctx.var.upstream_uri, + entry_span, + upstream_name, + context_carrier) + span.start(exit_span, ngx.now() * 1000) + span.setComponentId(exit_span, NGINX_COMPONENT_ID) + span.setLayer(exit_span, span_layer.HTTP) + + for name, value in pairs(context_carrier) do + ngx.req.set_header(name, value) + end + + -- Push the data in the context + ctx.sw_tracing_context = context + ctx.sw_entry_span = entry_span + ctx.sw_exit_span = exit_span + + core.log.debug("push data into skywalking context") +end + +function _M.finish(ctx) + -- Finish the exit span when received the first response package from upstream + if ctx.sw_exit_span then + span.finish(ctx.sw_exit_span, ngx.now() * 1000) + ctx.sw_exit_span = nil + end +end + +function _M.prepareForReport(ctx, endpoint) + if ctx.sw_entry_span then + span.finish(ctx.sw_entry_span, ngx.now() * 1000) + local status, segment = tracing_context.drainAfterFinished(ctx.sw_tracing_context) + if status then + local segment_json = core.json.encode(sw_segment.transform(segment)) + core.log.debug('segment = ', segment_json) + + local tracing_buffer = ngx.shared['skywalking-tracing-buffer'] + local length = tracing_buffer:lpush(endpoint .. '_segment', segment_json) + core.log.debug('segment buffer size = ', length) + end + end +end + +return _M diff --git a/apisix/plugins/syslog.lua b/apisix/plugins/syslog.lua new file mode 100644 index 0000000000000..7b96a2e010b67 --- /dev/null +++ b/apisix/plugins/syslog.lua @@ -0,0 +1,189 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local log_util = require("apisix.utils.log-util") +local batch_processor = require("apisix.utils.batch-processor") +local logger_socket = require("resty.logger.socket") +local plugin_name = "syslog" +local ngx = ngx +local buffers = {} +local ipairs = ipairs +local stale_timer_running = false; +local timer_at = ngx.timer.at +local tostring = tostring + +local schema = { + type = "object", + properties = { + host = {type = "string"}, + port = {type = "integer"}, + name = {type = "string", default = "sys logger"}, + flush_limit = {type = "integer", minimum = 1, default = 4096}, + drop_limit = {type = "integer", default = 1048576}, + timeout = {type = "integer", minimum = 1, default = 3}, + sock_type = {type = "string", default = "tcp"}, + max_retry_times = {type = "integer", minimum = 1, default = 1}, + retry_interval = {type = "integer", minimum = 0, default = 1}, + pool_size = {type = "integer", minimum = 5, default = 5}, + tls = {type = "boolean", default = false}, + batch_max_size = {type = "integer", minimum = 1, default = 1000}, + buffer_duration = {type = "integer", minimum = 1, default = 60}, + include_req_body = {type = "boolean", default = false} + }, + required = {"host", "port"} +} + +local lrucache = core.lrucache.new({ + ttl = 300, count = 512 +}) + +local _M = { + version = 0.1, + priority = 401, + name = plugin_name, + schema = schema, +} + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + +function _M.flush_syslog(logger) + local ok, err = logger:flush(logger) + if not ok then + core.log.error("failed to flush message:", err) + end +end + +local function send_syslog_data(conf, log_message) + local err_msg + local res = true + + -- fetch api_ctx + local api_ctx = ngx.ctx.api_ctx + if not api_ctx then + core.log.error("invalid api_ctx cannot proceed with sys logger plugin") + return core.response.exit(500) + end + + -- fetch it from lrucache + local logger, err = lrucache(api_ctx.conf_type .. "#" .. api_ctx.conf_id, api_ctx.conf_version, + logger_socket.new, logger_socket, { + host = conf.host, + port = conf.port, + flush_limit = conf.flush_limit, + drop_limit = conf.drop_limit, + timeout = conf.timeout, + sock_type = conf.sock_type, + max_retry_times = conf.max_retry_times, + retry_interval = conf.retry_interval, + pool_size = conf.pool_size, + tls = conf.tls, + }) + + if not logger then + res = false + err_msg = "failed when initiating the sys logger processor".. err + end + + -- reuse the logger object + local ok, err = logger:log(core.json.encode(log_message)) + if not ok then + res = false + err_msg = "failed to log message" .. err + end + + return res, err_msg +end + +-- remove stale objects from the memory after timer expires +local function remove_stale_objects(premature) + if premature then + return + end + + for key, batch in ipairs(buffers) do + if #batch.entry_buffer.entries == 0 and #batch.batch_to_process == 0 then + core.log.debug("removing batch processor stale object, route id:", tostring(key)) + buffers[key] = nil + end + end + + stale_timer_running = false +end + +-- log phase in APISIX +function _M.log(conf) + local entry = log_util.get_full_log(ngx, conf) + + if not entry.route_id then + core.log.error("failed to obtain the route id for sys logger") + return + end + + local log_buffer = buffers[entry.route_id] + + if not stale_timer_running then + -- run the timer every 30 mins if any log is present + timer_at(1800, remove_stale_objects) + stale_timer_running = true + end + + if log_buffer then + log_buffer:push(entry) + return + end + + -- Generate a function to be executed by the batch processor + local func = function(entries, batch_max_size) + local data, err + if batch_max_size == 1 then + data, err = core.json.encode(entries[1]) -- encode as single {} + else + data, err = core.json.encode(entries) -- encode as array [{}] + end + + if not data then + return false, 'error occurred while encoding the data: ' .. err + end + + return send_syslog_data(conf, data) + end + + local config = { + name = conf.name, + retry_delay = conf.retry_interval, + batch_max_size = conf.batch_max_size, + max_retry_count = conf.max_retry_times, + buffer_duration = conf.buffer_duration, + inactive_timeout = conf.timeout, + } + + local err + log_buffer, err = batch_processor:new(func, config) + + if not log_buffer then + core.log.error("error when creating the batch processor: ", err) + return + end + + buffers[entry.route_id] = log_buffer + log_buffer:push(entry) + +end + +return _M diff --git a/apisix/plugins/tcp-logger.lua b/apisix/plugins/tcp-logger.lua index 9eeef3320b778..ced5f8f23dad3 100644 --- a/apisix/plugins/tcp-logger.lua +++ b/apisix/plugins/tcp-logger.lua @@ -22,6 +22,9 @@ local tostring = tostring local buffers = {} local ngx = ngx local tcp = ngx.socket.tcp +local ipairs = ipairs +local stale_timer_running = false; +local timer_at = ngx.timer.at local schema = { type = "object", @@ -37,6 +40,7 @@ local schema = { buffer_duration = {type = "integer", minimum = 1, default = 60}, inactive_timeout = {type = "integer", minimum = 1, default = 5}, batch_max_size = {type = "integer", minimum = 1, default = 1000}, + include_req_body = {type = "boolean", default = false} }, required = {"host", "port"} } @@ -94,9 +98,25 @@ local function send_tcp_data(conf, log_message) return res, err_msg end +-- remove stale objects from the memory after timer expires +local function remove_stale_objects(premature) + if premature then + return + end + + for key, batch in ipairs(buffers) do + if #batch.entry_buffer.entries == 0 and #batch.batch_to_process == 0 then + core.log.debug("removing batch processor stale object, route id:", tostring(key)) + buffers[key] = nil + end + end + + stale_timer_running = false +end + function _M.log(conf) - local entry = log_util.get_full_log(ngx) + local entry = log_util.get_full_log(ngx, conf) if not entry.route_id then core.log.error("failed to obtain the route id for tcp logger") @@ -105,6 +125,12 @@ function _M.log(conf) local log_buffer = buffers[entry.route_id] + if not stale_timer_running then + -- run the timer every 30 mins if any log is present + timer_at(1800, remove_stale_objects) + stale_timer_running = true + end + if log_buffer then log_buffer:push(entry) return diff --git a/apisix/plugins/udp-logger.lua b/apisix/plugins/udp-logger.lua index b1b565fb1b2d0..cec782a347624 100644 --- a/apisix/plugins/udp-logger.lua +++ b/apisix/plugins/udp-logger.lua @@ -22,6 +22,9 @@ local tostring = tostring local buffers = {} local ngx = ngx local udp = ngx.socket.udp +local ipairs = ipairs +local stale_timer_running = false; +local timer_at = ngx.timer.at local schema = { type = "object", @@ -33,6 +36,7 @@ local schema = { buffer_duration = {type = "integer", minimum = 1, default = 60}, inactive_timeout = {type = "integer", minimum = 1, default = 5}, batch_max_size = {type = "integer", minimum = 1, default = 1000}, + include_req_body = {type = "boolean", default = false} }, required = {"host", "port"} } @@ -77,9 +81,25 @@ local function send_udp_data(conf, log_message) return res, err_msg end +-- remove stale objects from the memory after timer expires +local function remove_stale_objects(premature) + if premature then + return + end + + for key, batch in ipairs(buffers) do + if #batch.entry_buffer.entries == 0 and #batch.batch_to_process == 0 then + core.log.debug("removing batch processor stale object, route id:", tostring(key)) + buffers[key] = nil + end + end + + stale_timer_running = false +end + function _M.log(conf) - local entry = log_util.get_full_log(ngx) + local entry = log_util.get_full_log(ngx, conf) if not entry.route_id then core.log.error("failed to obtain the route id for udp logger") @@ -88,6 +108,12 @@ function _M.log(conf) local log_buffer = buffers[entry.route_id] + if not stale_timer_running then + -- run the timer every 30 mins if any log is present + timer_at(1800, remove_stale_objects) + stale_timer_running = true + end + if log_buffer then log_buffer:push(entry) return diff --git a/apisix/plugins/uri-blocker.lua b/apisix/plugins/uri-blocker.lua new file mode 100644 index 0000000000000..ab5b6828e7724 --- /dev/null +++ b/apisix/plugins/uri-blocker.lua @@ -0,0 +1,86 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local re_compile = require("resty.core.regex").re_match_compile +local re_find = ngx.re.find +local ipairs = ipairs + +local schema = { + type = "object", + properties = { + block_rules = { + type = "array", + items = { + type = "string", + minLength = 1, + maxLength = 4096, + }, + uniqueItems = true + }, + rejected_code = { + type = "integer", + minimum = 200, + default = 403 + }, + }, + required = {"block_rules"}, +} + + +local plugin_name = "uri-blocker" + +local _M = { + version = 0.1, + priority = 2900, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + local block_rules = {} + for i, re_rule in ipairs(conf.block_rules) do + local ok, err = re_compile(re_rule, "j") + -- core.log.warn("ok: ", tostring(ok), " err: ", tostring(err), " re_rule: ", re_rule) + if not ok then + return false, err + end + block_rules[i] = re_rule + end + + conf.block_rules_concat = core.table.concat(block_rules, "|") + core.log.info("concat block_rules: ", conf.block_rules_concat) + return true +end + + +function _M.rewrite(conf, ctx) + core.log.info("uri: ", ctx.var.request_uri) + core.log.info("block uri rules: ", conf.block_rules_concat) + local from = re_find(ctx.var.request_uri, conf.block_rules_concat, "jo") + if from then + core.response.exit(conf.rejected_code) + end +end + + +return _M diff --git a/apisix/plugins/zipkin.lua b/apisix/plugins/zipkin.lua index 56412390e3794..934d88398ac15 100644 --- a/apisix/plugins/zipkin.lua +++ b/apisix/plugins/zipkin.lua @@ -48,7 +48,7 @@ local schema = { local _M = { version = 0.1, - priority = -1000, -- last running plugin, but before serverless post func + priority = -1000, name = plugin_name, schema = schema, } diff --git a/apisix/router.lua b/apisix/router.lua index d3b45941d9950..4ba8709937171 100644 --- a/apisix/router.lua +++ b/apisix/router.lua @@ -15,12 +15,13 @@ -- limitations under the License. -- local require = require -local core = require("apisix.core") -local error = error -local pairs = pairs +local core = require("apisix.core") +local error = error +local pairs = pairs +local ipairs = ipairs -local _M = {version = 0.2} +local _M = {version = 0.3} local function filter(route) @@ -29,17 +30,36 @@ local function filter(route) return end - if not route.value.upstream then + if not route.value.upstream or not route.value.upstream.nodes then return end - for addr, _ in pairs(route.value.upstream.nodes or {}) do - local host = core.utils.parse_addr(addr) - if not core.utils.parse_ipv4(host) and - not core.utils.parse_ipv6(host) then - route.has_domain = true - break + local nodes = route.value.upstream.nodes + if core.table.isarray(nodes) then + for _, node in ipairs(nodes) do + local host = node.host + if not core.utils.parse_ipv4(host) and + not core.utils.parse_ipv6(host) then + route.has_domain = true + break + end end + else + local new_nodes = core.table.new(core.table.nkeys(nodes), 0) + for addr, weight in pairs(nodes) do + local host, port = core.utils.parse_addr(addr) + if not core.utils.parse_ipv4(host) and + not core.utils.parse_ipv6(host) then + route.has_domain = true + end + local node = { + host = host, + port = port, + weight = weight, + } + core.table.insert(new_nodes, node) + end + route.value.upstream.nodes = new_nodes end core.log.info("filter route: ", core.json.delay_encode(route)) @@ -78,7 +98,7 @@ end function _M.stream_init_worker() local router_stream = require("apisix.stream.router.ip_port") - router_stream.stream_init_worker() + router_stream.stream_init_worker(filter) _M.router_stream = router_stream end @@ -88,4 +108,8 @@ function _M.http_routes() end +-- for test +_M.filter_test = filter + + return _M diff --git a/apisix/schema_def.lua b/apisix/schema_def.lua index e261c98c1ec40..d580be6982ed3 100644 --- a/apisix/schema_def.lua +++ b/apisix/schema_def.lua @@ -18,7 +18,7 @@ local schema = require('apisix.core.schema') local setmetatable = setmetatable local error = error -local _M = {version = 0.4} +local _M = {version = 0.5} local plugins_schema = { @@ -225,11 +225,9 @@ local health_checker = { } -local upstream_schema = { - type = "object", - properties = { - nodes = { - description = "nodes of upstream", +local nodes_schema = { + anyOf = { + { type = "object", patternProperties = { [".*"] = { @@ -240,6 +238,39 @@ local upstream_schema = { }, minProperties = 1, }, + { + type = "array", + minItems = 1, + items = { + type = "object", + properties = { + host = host_def, + port = { + description = "port of node", + type = "integer", + minimum = 1, + }, + weight = { + description = "weight of node", + type = "integer", + minimum = 0, + }, + metadata = { + description = "metadata of node", + type = "object", + } + }, + required = {"host", "port", "weight"}, + }, + } + } +} + + +local upstream_schema = { + type = "object", + properties = { + nodes = nodes_schema, retries = { type = "integer", minimum = 1, @@ -296,12 +327,15 @@ local upstream_schema = { description = "enable websocket for request", type = "boolean" }, + name = {type = "string", maxLength = 50}, desc = {type = "string", maxLength = 256}, + service_name = {type = "string", maxLength = 50}, id = id_schema }, anyOf = { {required = {"type", "nodes"}}, {required = {"type", "k8s_deployment_info"}}, + {required = {"type", "service_name"}}, }, additionalProperties = false, } @@ -336,6 +370,7 @@ _M.route = { }, uniqueItems = true, }, + name = {type = "string", maxLength = 50}, desc = {type = "string", maxLength = 256}, priority = {type = "integer", default = 0}, @@ -413,6 +448,7 @@ _M.service = { plugins = plugins_schema, upstream = upstream_schema, upstream_id = id_schema, + name = {type = "string", maxLength = 50}, desc = {type = "string", maxLength = 256}, }, anyOf = { @@ -445,6 +481,7 @@ _M.upstream = upstream_schema _M.ssl = { type = "object", properties = { + id = id_schema, cert = { type = "string", minLength = 128, maxLength = 64*1024 }, @@ -454,13 +491,34 @@ _M.ssl = { sni = { type = "string", pattern = [[^\*?[0-9a-zA-Z-.]+$]], + }, + snis = { + type = "array", + items = { + type = "string", + pattern = [[^\*?[0-9a-zA-Z-.]+$]], + } + }, + exptime = { + type = "integer", + minimum = 1588262400, -- 2020/5/1 0:0:0 + }, + status = { + description = "ssl status, 1 to enable, 0 to disable", + type = "integer", + enum = {1, 0}, + default = 1 } }, - required = {"sni", "key", "cert"}, + oneOf = { + {required = {"sni", "key", "cert"}}, + {required = {"snis", "key", "cert"}} + }, additionalProperties = false, } + _M.proto = { type = "object", properties = { @@ -476,6 +534,7 @@ _M.proto = { _M.global_rule = { type = "object", properties = { + id = id_schema, plugins = plugins_schema }, required = {"plugins"}, @@ -486,6 +545,7 @@ _M.global_rule = { _M.stream_route = { type = "object", properties = { + id = id_schema, remote_addr = remote_addr_def, server_addr = { description = "server IP", diff --git a/apisix/stream/plugins/mqtt-proxy.lua b/apisix/stream/plugins/mqtt-proxy.lua index f8d3552c66c42..b5334306b1697 100644 --- a/apisix/stream/plugins/mqtt-proxy.lua +++ b/apisix/stream/plugins/mqtt-proxy.lua @@ -15,8 +15,8 @@ -- limitations under the License. -- local core = require("apisix.core") -local balancer = require("ngx.balancer") -local bit = require "bit" +local upstream = require("apisix.upstream") +local bit = require("bit") local ngx = ngx local ngx_exit = ngx.exit local str_byte = string.byte @@ -158,25 +158,28 @@ function _M.preread(conf, ctx) end core.log.info("mqtt client id: ", res.client_id) -end + local up_conf = { + type = "roundrobin", + nodes = { + {host = conf.upstream.ip, port = conf.upstream.port, weight = 1}, + } + } -function _M.log(conf, ctx) - core.log.info("plugin log phase, conf: ", core.json.encode(conf)) -end + local ok, err = upstream.check_schema(up_conf) + if not ok then + return 500, err + end + local matched_route = ctx.matched_route + upstream.set(ctx, up_conf.type .. "#route_" .. matched_route.value.id, + ctx.conf_version, up_conf, matched_route) + return +end -function _M.balancer(conf, ctx) - core.log.info("plugin balancer phase, conf: ", core.json.encode(conf)) - -- ctx.balancer_name = plugin_name - local up = conf.upstream - ctx.balancer_name = plugin_name - local ok, err = balancer.set_current_peer(up.ip, up.port) - if not ok then - core.log.error("failed to set server peer: ", err) - return ngx_exit(1) - end +function _M.log(conf, ctx) + core.log.info("plugin log phase, conf: ", core.json.encode(conf)) end diff --git a/apisix/upstream.lua b/apisix/upstream.lua new file mode 100644 index 0000000000000..203b713d01070 --- /dev/null +++ b/apisix/upstream.lua @@ -0,0 +1,154 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local error = error +local tostring = tostring +local ipairs = ipairs +local pairs = pairs +local upstreams + + +local _M = {} + + +local function set_directly(ctx, key, ver, conf, parent) + if not ctx then + error("missing argument ctx", 2) + end + if not key then + error("missing argument key", 2) + end + if not ver then + error("missing argument ver", 2) + end + if not conf then + error("missing argument conf", 2) + end + if not parent then + error("missing argument parent", 2) + end + + ctx.upstream_conf = conf + ctx.upstream_version = ver + ctx.upstream_key = key + ctx.upstream_healthcheck_parent = parent + return +end +_M.set = set_directly + + +function _M.set_by_route(route, api_ctx) + if api_ctx.upstream_conf then + return true + end + + local up_id = route.value.upstream_id + if up_id then + if not upstreams then + return false, "need to create a etcd instance for fetching " + .. "upstream information" + end + + local up_obj = upstreams:get(tostring(up_id)) + if not up_obj then + return false, "failed to find upstream by id: " .. up_id + end + core.log.info("upstream: ", core.json.delay_encode(up_obj)) + + local up_conf = up_obj.dns_value or up_obj.value + set_directly(api_ctx, up_conf.type .. "#upstream_" .. up_id, + up_obj.modifiedIndex, up_conf, up_obj) + return true + end + + local up_conf = (route.dns_value and route.dns_value.upstream) + or route.value.upstream + if not up_conf then + return false, "missing upstream configuration in Route or Service" + end + + set_directly(api_ctx, up_conf.type .. "#route_" .. route.value.id, + api_ctx.conf_version, up_conf, route) + return true +end + + +function _M.upstreams() + if not upstreams then + return nil, nil + end + + return upstreams.values, upstreams.conf_version +end + + +function _M.check_schema(conf) + return core.schema.check(core.schema.upstream, conf) +end + + +function _M.init_worker() + local err + upstreams, err = core.config.new("/upstreams", { + automatic = true, + item_schema = core.schema.upstream, + filter = function(upstream) + upstream.has_domain = false + if not upstream.value or not upstream.value.nodes then + return + end + + local nodes = upstream.value.nodes + if core.table.isarray(nodes) then + for _, node in ipairs(nodes) do + local host = node.host + if not core.utils.parse_ipv4(host) and + not core.utils.parse_ipv6(host) then + upstream.has_domain = true + break + end + end + else + local new_nodes = core.table.new(core.table.nkeys(nodes), 0) + for addr, weight in pairs(nodes) do + local host, port = core.utils.parse_addr(addr) + if not core.utils.parse_ipv4(host) and + not core.utils.parse_ipv6(host) then + upstream.has_domain = true + end + local node = { + host = host, + port = port, + weight = weight, + } + core.table.insert(new_nodes, node) + end + upstream.value.nodes = new_nodes + end + + core.log.info("filter upstream: ", core.json.delay_encode(upstream)) + end, + }) + if not upstreams then + error("failed to create etcd instance for fetching upstream: " .. err) + return + end +end + + +return _M diff --git a/apisix/utils/log-util.lua b/apisix/utils/log-util.lua index 6ee03b288db6f..b11a435808f88 100644 --- a/apisix/utils/log-util.lua +++ b/apisix/utils/log-util.lua @@ -18,7 +18,7 @@ local core = require("apisix.core") local _M = {} -local function get_full_log(ngx) +local function get_full_log(ngx, conf) local ctx = ngx.ctx.api_ctx local var = ctx.var local service_id @@ -34,7 +34,7 @@ local function get_full_log(ngx) service_id = var.host end - return { + local log = { request = { url = url, uri = var.request_uri, @@ -56,6 +56,20 @@ local function get_full_log(ngx) start_time = ngx.req.start_time() * 1000, latency = (ngx.now() - ngx.req.start_time()) * 1000 } + + if conf.include_req_body then + local body = ngx.req.get_body_data() + if body then + log.request.body = body + else + local body_file = ngx.req.get_body_file() + if body_file then + log.request.body_file = body_file + end + end + end + + return log end _M.get_full_log = get_full_log diff --git a/benchmark/fake-apisix/conf/nginx.conf b/benchmark/fake-apisix/conf/nginx.conf index 327169adf189b..8666c29979772 100644 --- a/benchmark/fake-apisix/conf/nginx.conf +++ b/benchmark/fake-apisix/conf/nginx.conf @@ -24,7 +24,6 @@ pid logs/nginx.pid; worker_rlimit_nofile 20480; events { - accept_mutex off; worker_connections 10620; } @@ -33,6 +32,9 @@ worker_shutdown_timeout 3; http { lua_package_path "$prefix/lua/?.lua;;"; + log_format main '$remote_addr - $remote_user [$time_local] $http_host "$request" $status $body_bytes_sent $request_time "$http_referer" "$http_user_agent" $upstream_addr $upstream_status $upstream_response_time'; + access_log logs/access.log main buffer=16384 flush=5; + init_by_lua_block { require "resty.core" apisix = require("apisix") @@ -60,8 +62,6 @@ http { listen 9080; - access_log off; - server_tokens off; more_set_headers 'Server: APISIX web server'; @@ -106,6 +106,10 @@ http { apisix.http_header_filter_phase() } + body_filter_by_lua_block { + apisix.http_body_filter_phase() + } + log_by_lua_block { apisix.http_log_phase() } diff --git a/benchmark/fake-apisix/lua/apisix.lua b/benchmark/fake-apisix/lua/apisix.lua index 30671f7fa41b6..ea5bf15bf1117 100644 --- a/benchmark/fake-apisix/lua/apisix.lua +++ b/benchmark/fake-apisix/lua/apisix.lua @@ -25,7 +25,7 @@ end local function fake_fetch() ngx.ctx.ip = "127.0.0.1" - ngx.ctx.port = 80 + ngx.ctx.port = 1980 end function _M.http_access_phase() @@ -42,6 +42,12 @@ function _M.http_header_filter_phase() end end +function _M.http_body_filter_phase() + if ngx.ctx then + -- do something + end +end + function _M.http_log_phase() if ngx.ctx then -- do something diff --git a/benchmark/run.sh b/benchmark/run.sh index ff068d64f57b1..c41b554d2ce5e 100755 --- a/benchmark/run.sh +++ b/benchmark/run.sh @@ -36,7 +36,12 @@ function onCtrlC () { sudo openresty -p $PWD/benchmark/server -s stop || exit 1 } -sed -i "s/worker_processes [0-9]*/worker_processes $worker_cnt/g" conf/nginx.conf +if [[ "$(uname)" == "Darwin" ]]; then + sed -i "" "s/worker_processes .*/worker_processes $worker_cnt;/g" conf/nginx.conf +else + sed -i "s/worker_processes .*/worker_processes $worker_cnt;/g" conf/nginx.conf +fi + make run sleep 3 diff --git a/bin/apisix b/bin/apisix index 1659de218ddce..4d7857ada7227 100755 --- a/bin/apisix +++ b/bin/apisix @@ -21,6 +21,8 @@ local function trim(s) return (s:gsub("^%s*(.-)%s*$", "%1")) end +-- Note: The `excute_cmd` return value will have a line break at the end, +-- it is recommended to use the `trim` function to handle the return value. local function excute_cmd(cmd) local t, err = io.popen(cmd) if not t then @@ -103,7 +105,6 @@ events { } worker_rlimit_core {* worker_rlimit_core *}; -working_directory /tmp/apisix_cores/; worker_shutdown_timeout 3; @@ -179,6 +180,7 @@ http { lua_shared_dict upstream-healthcheck 10m; lua_shared_dict worker-events 10m; lua_shared_dict lrucache-lock 10m; + lua_shared_dict skywalking-tracing-buffer 100m; # for openid-connect plugin lua_shared_dict discovery 1m; # cache for discovery metadata documents @@ -227,7 +229,7 @@ http { log_format main '$remote_addr - $remote_user [$time_local] $http_host "$request" $status $body_bytes_sent $request_time "$http_referer" "$http_user_agent" $upstream_addr $upstream_status $upstream_response_time'; - access_log {* http.access_log *} main buffer=32768 flush=3; + access_log {* http.access_log *} main buffer=16384 flush=3; open_file_cache max=1000 inactive=60; client_max_body_size 0; keepalive_timeout {* http.keepalive_timeout *}; @@ -239,6 +241,7 @@ http { more_set_headers 'Server: APISIX web server'; include mime.types; + charset utf-8; {% if real_ip_header then %} real_ip_header {* real_ip_header *}; @@ -284,7 +287,18 @@ http { {% if enable_admin and port_admin then %} server { + {%if https_admin then%} + listen {* port_admin *} ssl; + ssl_certificate cert/apisix_admin_ssl.crt; + ssl_certificate_key cert/apisix_admin_ssl.key; + ssl_session_cache shared:SSL:1m; + + ssl_protocols {* ssl.ssl_protocols *}; + ssl_ciphers {* ssl.ssl_ciphers *}; + ssl_prefer_server_ciphers on; + {% else %} listen {* port_admin *}; + {%end%} log_not_found off; location /apisix/admin { {%if allow_admin then%} @@ -309,7 +323,7 @@ http { alias dashboard/; - try_files $uri $uri/index.html /index.html; + try_files $uri $uri/index.html /index.html =404; } location /robots.txt { @@ -379,7 +393,7 @@ http { alias dashboard/; - try_files $uri $uri/index.html /index.html; + try_files $uri $uri/index.html /index.html =404; } {% end %} @@ -663,7 +677,7 @@ local function init() local sys_conf = { lua_path = pkg_path_org, lua_cpath = pkg_cpath_org, - os_name = excute_cmd("uname"), + os_name = trim(excute_cmd("uname")), apisix_lua_home = apisix_home, with_module_status = with_module_status, error_log = {level = "warn"}, @@ -699,6 +713,7 @@ local function init() if(sys_conf["enable_dev_mode"] == true) then sys_conf["worker_processes"] = 1 + sys_conf["enable_reuseport"] = false else sys_conf["worker_processes"] = "auto" end @@ -768,6 +783,18 @@ local function init_etcd(show_output) local host_count = #(yaml_conf.etcd.host) + -- check whether the user has enabled etcd v2 protocol + for index, host in ipairs(yaml_conf.etcd.host) do + uri = host .. "/v2/keys" + local cmd = "curl -i -m ".. timeout * 2 .. " -o /dev/null -s -w %{http_code} " .. uri + local res = excute_cmd(cmd) + if res == "404" then + io.stderr:write(string.format("failed: please make sure that you have enabled the v2 protocol of etcd on %s.\n", host)) + return + end + end + + local etcd_ok = false for index, host in ipairs(yaml_conf.etcd.host) do local is_success = true @@ -786,7 +813,7 @@ local function init_etcd(show_output) if not res:find("index", 1, true) and not res:find("createdIndex", 1, true) then is_success = false - if (index == hostCount) then + if (index == host_count) then error(cmd .. "\n" .. res) end break @@ -799,9 +826,14 @@ local function init_etcd(show_output) end if is_success then + etcd_ok = true break end end + + if not etcd_ok then + error("none of the configured etcd works well") + end end _M.init_etcd = init_etcd @@ -830,13 +862,17 @@ end function _M.reload() local test_cmd = openresty_args .. [[ -t -q ]] - if os.execute((test_cmd)) ~= 0 then + -- When success, + -- On linux, os.execute returns 0, + -- On macos, os.execute returns 3 values: true, exit, 0, and we need the first. + local test_ret = os.execute((test_cmd)) + if (test_ret == 0 or test_ret == true) then + local cmd = openresty_args .. [[ -s reload]] + -- print(cmd) + os.execute(cmd) return end - - local cmd = openresty_args .. [[ -s reload]] - -- print(cmd) - os.execute(cmd) + print("test openresty failed") end function _M.version() diff --git a/conf/cert/apisix_admin_ssl.crt b/conf/cert/apisix_admin_ssl.crt new file mode 100644 index 0000000000000..82d7fc3aa31a5 --- /dev/null +++ b/conf/cert/apisix_admin_ssl.crt @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFsTCCA5mgAwIBAgIUODyT8W4gAxf8uwMNmtj5M1ANoUwwDQYJKoZIhvcNAQEL +BQAwVjELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwG +Wmh1SGFpMQ0wCwYDVQQKDARhcGk3MRMwEQYDVQQDDAphcGlzaXguZGV2MCAXDTIw +MDYwNDAzMzc1MFoYDzIxMjAwNTExMDMzNzUwWjBWMQswCQYDVQQGEwJDTjESMBAG +A1UECAwJR3VhbmdEb25nMQ8wDQYDVQQHDAZaaHVIYWkxDTALBgNVBAoMBGFwaTcx +EzARBgNVBAMMCmFwaXNpeC5kZXYwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQDQveSdplH49Lr+LsLWpGJbNRhf2En0V4SuFKpzGFP7mXaI7rMnpdH3BUVY +S3juMgPOdNh6ho4BeSbGZGfU3lG1NwIOXiPNA1mrTWGNGV97crJDVZeWTuDpqNHJ +4ATrnF6RnRbg0en8rjVtce6LBMrDJVyGbi9VAqBUPrCmzT/l0V1jPL6KNSN8mQog +ladrJuzUanfhWM9K9xyM+/SUt1MNUYFLNsVHasPzsi5/YDRBiwuzTtiT56O6yge2 +lvrdPFvULrCxlGteyvhtrFJwqjN//YtnQFooNR0CXBfXs0a7WGgMjawupuP1JKiY +t9KEcGHWGZDeLfsGGKgQ9G+PaP4y+gHjLr5xQvwt68otpoafGy+BpOoHZZFoLBpx +TtJKA3qnwyZg9zr7lrtqr8CISO/SEyh6xkAOUzb7yc2nHu9UpruzVIR7xI7pjc7f +2T6WyCVy6gFYQwzFLwkN/3O+ZJkioxXsnwaYWDj61k3d9ozVDkVkTuxmNJjXV8Ta +htGRAHo0/uHmpFTcaQfDf5o+iWi4z9B5kgfA/A1XWFQlCH1kl3mHKg7JNCN9qGF8 +rG+YzdiLQfo5OqJSvzGHRXbdGI2JQe/zyJHsMO7d0AhwXuPOWGTTAODOPlaBCxNB +AgjuUgt+3saqCrK4eaOo8sPt055AYJhZlaTH4EeD4sv7rJGm7wIDAQABo3UwczAd +BgNVHQ4EFgQUPS1LXZMqgQvH/zQHHzgTzrd7PIIwHwYDVR0jBBgwFoAUPS1LXZMq +gQvH/zQHHzgTzrd7PIIwDAYDVR0TBAUwAwEB/zAjBgNVHREEHDAaggphcGlzaXgu +ZGV2ggwqLmFwaXNpeC5kZXYwDQYJKoZIhvcNAQELBQADggIBAMlwNS8uo3JkkshI +rpYobdjCZfr74PBl+LhoihvzHs25/in3+CxETRA8cYo5pRotqdA63po3wiCCPs6a +mZiELQxyGHhFcqoYxnoURR4nyogRZLA6jjLGkbG4H+CA4ApmZmvGnP3X5uQW4v5q +IdqIXL3BvoUBln8GMEC7Rz5SGUjWG03JPkl6MdeziFyHkwdBCOrtK5m7icRncvq+ +iL8CMUx024LLI6A5hTBPwfVfgbWJTSv7tEu85q54ZZoYQhiD8dde4D7g5/noPvXM +ZyA9C3Sl981+pUhhazad9j9k8DCcqf9e8yH9lPY26tjiEcShv4YnwbErWzJU1F9s +ZI5Z6nj5PU66upnBWAWV7fWCOrlouB4GjNaznSNrmpn4Bb2+FinDK3t4AfWDPS5s +ljQBGQNXOd30DC7BdNAF5dQAUhVfz1EgQGqYa+frMQLiv8rNMs7h6gKQEqU+jC/1 +jbGe4/iwc0UeTtSgTPHMofqjqc99/R/ZqtJ3qFPJmoWpyu0NlNINw2KWRQaMoGLo +WgDCS0YA5/hNXVFcWnZ73jY62yrVSoj+sFbkUpGWhEFnO+uSmBv8uwY3UeCOQDih +X7Yazs3TZRqEPU+25QATf0kbxyzlWbGkwvyRD8x+n3ZHs5Ilhrc6jWHqM/S3ir7i +m9GcWiwg++EbusQsqs3w3uKAHAdT +-----END CERTIFICATE----- diff --git a/conf/cert/apisix_admin_ssl.key b/conf/cert/apisix_admin_ssl.key new file mode 100644 index 0000000000000..ec889056ffb63 --- /dev/null +++ b/conf/cert/apisix_admin_ssl.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKQIBAAKCAgEA0L3knaZR+PS6/i7C1qRiWzUYX9hJ9FeErhSqcxhT+5l2iO6z +J6XR9wVFWEt47jIDznTYeoaOAXkmxmRn1N5RtTcCDl4jzQNZq01hjRlfe3KyQ1WX +lk7g6ajRyeAE65xekZ0W4NHp/K41bXHuiwTKwyVchm4vVQKgVD6wps0/5dFdYzy+ +ijUjfJkKIJWnaybs1Gp34VjPSvccjPv0lLdTDVGBSzbFR2rD87Iuf2A0QYsLs07Y +k+ejusoHtpb63Txb1C6wsZRrXsr4baxScKozf/2LZ0BaKDUdAlwX17NGu1hoDI2s +Lqbj9SSomLfShHBh1hmQ3i37BhioEPRvj2j+MvoB4y6+cUL8LevKLaaGnxsvgaTq +B2WRaCwacU7SSgN6p8MmYPc6+5a7aq/AiEjv0hMoesZADlM2+8nNpx7vVKa7s1SE +e8SO6Y3O39k+lsglcuoBWEMMxS8JDf9zvmSZIqMV7J8GmFg4+tZN3faM1Q5FZE7s +ZjSY11fE2obRkQB6NP7h5qRU3GkHw3+aPolouM/QeZIHwPwNV1hUJQh9ZJd5hyoO +yTQjfahhfKxvmM3Yi0H6OTqiUr8xh0V23RiNiUHv88iR7DDu3dAIcF7jzlhk0wDg +zj5WgQsTQQII7lILft7GqgqyuHmjqPLD7dOeQGCYWZWkx+BHg+LL+6yRpu8CAwEA +AQKCAgBNsbBLAWHXYPfMrgj1LUAypIOLAQ0dtgl7ZdO/fRmdNxSIiRgDtNN+tuaF +o6nCNrl1+cWtbTGj2L0W8L442/rbkTrhsCZxI0MX4HhjtUL1xs4VA+GlH3zVW3Gi +SxBpxczpM+gVC+ykkQ7vyo04DzONCPX0T0Ssxop4cND9dL3Iw3GYAz8EYBzyPmAn +mqwy1M0nju1J4e1eALYOv6TcSZPPDDwsi5lIKLQAm5x06pDoqGFVfw5blsc5OgM+ +8dkzyUiApFQ99Hk2UiO/ZnlU1/TNOcjOSISGHKbMfwycy2yTRKeNrJmez51fXCKo +nRrtEotHzkI+gCzDqx+7F9ACN9kM4f4JO5ca0/My6tCY+mH8TA/nVzMnUpL7329w +NobuNTpyA6x5nmB3QqElrzQCRtTj7Nw5ytMdRbByJhXww9C5tajUysdq8oGoZdz5 +94kXr6qCC5Qm3CkgyF2RjqZyg9tHUEEdaFKouHgziiqG9P2Nk1SHk7Jd7bF4rleI +i93u/f0fdVK7aMksofgUbOmfhnS+o1NxerVcbdX+E/iv6yfkrYDb46y3//4dcpwk +TeUEMCjc7ShwvYPq350q3jmzgwxeTK8ZdXwJymdJ7MaGcnMXPqd9A43evYM6nG6f +i3l2tYhH4cp6misGChnGORR68qsRkY8ssvSFNFzjcFHhnPyoCQKCAQEA8isIC1IJ +Iq9kB4mDVh0QdiuoBneNOEHy/8fASeZsqedu0OZPyoXU96iOhXuqf8sQ33ydvPef +iRwasLLkgw8sDeWILUjS36ZzwGP2QNxWfrapCFS8VfKl7hTPMVp0Wzxh8qqpGLSh +O0W7EEAJCgzzULagfupaO0Chmb3LZqXRp8m5oubnmE+9z0b5GrCIT1S8Yay2mEw9 +jxqZJGBhV7QnupyC2DIxLXlGmQk7Qs1+1mCCFwyfugHXclWYa+fet/79SkkADK0/ +ysxfy+FdZgGT/Ba5odsEpt1zH+tw4WXioJsX9mU3zAHbpPqtcfuVU+2xyKfQYrRG +NSm9MMNmart0wwKCAQEA3Koaj/0gNxLLslLIES50KmmagzU8CkEmCa/WLoVy02xr +qp42hvj+PzBTf3rIno3KEpRhMmnAtswozbV3P4l/VSZdfY+pwWsx7/5+Cf1R9nAP +vp6YCjGcLcbASazYNOWf0FRInt3pxdgT9DWjJDi99FGKA+UbI2yxHwzE+cE8r9Od +Iy42uhzCjJBqdg+an+q63k6yrOwv18KP69LlU/4vknhw4g3WxF4yTwVmXU8WKmux +aOrJv2ED8pfA7k+zwv0rPyN+F2nOySxoChaFfeu6ntBCX7zK/nV0DsMQImOycfzO +yN8WB9lRZTJVzU2r6PaGAI359uLHEmURy0069g+yZQKCAQAbECwJ99UFh0xKe1eu +G/lm+2H/twSVMOmTJCOdHp8uLar4tYRdQa+XLcMfr75SIcN09lw6bgHqNLXW4Wcg +LmXh97DMPsMyM0vkSEeQ4A7agldJkw6pHEDm5nRxM4alW44mrGPRWv5ZvWU2X7Gi +6eeXMZGmHVKQJJzqrYc5pXZUpfqU9fET2HWB4JCeJvRUyUd0MvUE+CA5CePraMn4 +Hy4BcNQ+jP1p84+sMpfo00ZFduuS39pJ00LciCxMgtElBt4PmzDiOcpTQ5vBESJ6 +79o15eRA7lUKwNzIyGsJBXXaNPrskks2BU8ilNElV9RMWNfxcK+dGEBwWIXIGU4s +x145AoIBAQCst9R8udNaaDLaTGNe126DuA8B/kwVdrLwSBqsZTXgeO+5J4dklEZl +bU0d7hxTxoXRjySZEh+OtTSG9y/0oonxO0tYOXfU9jOrNxaueQKLk2EvgfFdoUEu +r2/Y+xpsJQO3TBFfkDEn856Cuu0MMAG214/gxpY8XxowRI11NCRtN4S6gbTCbjp1 +TaCW8lXEMDW+Rfki0ugLyLVgD74CxWW1DuLEfbKKF3TnV0GtbXbbE1pU1dm+G5C8 +dL3FissYp5MPI5fRebcqzcBNjR1F15pGLpqVVy/IhmSmHVZmpISLJicxITScRiSo +wgJY5R/XBAcVLgvmi9Dn/AY2jCfHa7flAoIBAQCbnZ6ivZg81g6/X9qdo9J61hX0 +Y7Fn7bLvcs1L0ARGTsfXMvegA806XyZThqjpY47nHpQtoz4z62kiTTsdpAZUeA3z +9HUWr0b3YEpsvZpgyMNHgwq1vRDPjw4AWz0pBoDWMxx8Ck5nP1A//c1zyu9pgYEU +R+OutDeCJ+0VAc6JSH9WMA08utGPGs3t02Zhtyt2sszE9vzz4hTi5340/AYG72p7 +YGlikUxvbyylYh9wR4YUYa/klikvKLHEML1P0BCr8Vex+wLSGS1h1F5tW1Xr2CZQ +dVxFmfGmPDmwWbCQR6Rvt6FHRwNMpMrLr011h2RBcHBpdQl7XpUENDoopIh0 +-----END RSA PRIVATE KEY----- diff --git a/conf/cert/openssl-test2.conf b/conf/cert/openssl-test2.conf new file mode 100644 index 0000000000000..1e5beec911dff --- /dev/null +++ b/conf/cert/openssl-test2.conf @@ -0,0 +1,40 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +[req] +distinguished_name = req_distinguished_name +x509_extensions = v3_req +prompt = no + +[req_distinguished_name] +C = CN +ST = GuangDong +L = ZhuHai +O = iresty +CN = test2.com + +[v3_req] +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer +basicConstraints = CA:TRUE +subjectAltName = @alt_names + +[alt_names] +DNS.1 = test2.com +DNS.2 = *.test2.com + +## openssl genrsa -out test2.key 3072 +## openssl req -new -x509 -key test2.key -sha256 -config openssl-test2.conf -out test2.crt -days 36500 diff --git a/conf/cert/test2.crt b/conf/cert/test2.crt new file mode 100644 index 0000000000000..922a8f8b6896f --- /dev/null +++ b/conf/cert/test2.crt @@ -0,0 +1,28 @@ +-----BEGIN CERTIFICATE----- +MIIEsTCCAxmgAwIBAgIUMbgUUCYHkuKDaPy0bzZowlK0JG4wDQYJKoZIhvcNAQEL +BQAwVzELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwG +Wmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxEjAQBgNVBAMMCXRlc3QyLmNvbTAgFw0y +MDA0MDQyMjE3NTJaGA8yMTIwMDMxMTIyMTc1MlowVzELMAkGA1UEBhMCQ04xEjAQ +BgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwGWmh1SGFpMQ8wDQYDVQQKDAZpcmVz +dHkxEjAQBgNVBAMMCXRlc3QyLmNvbTCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCC +AYoCggGBAMQGBk35V3zaNVDWzEzVGd+EkZnUOrRpXQg5mmcnoKnrQ5rQQMsQCbMO +gFvLt/9OEZQmbE2HuEKsPzL79Yjdu8rGjSoQdbJZ9ccO32uvln1gn68iK79o7Tvm +TCi+BayyNA+lo9IxrBm1wGBkOU1ZPasGYzgBAbMLTSDps1EYxNR8t4l9PrTTRsh6 +NZyTYoDeVIsKZ9SckpjWVnxHOkF+AzZzIJJSe2pj572TDLYA/Xw9I4X3L+SHzwTl +iGWNXb2tU367LHERHvensQzdle7mQN2kE5GpB7QPWB+t9V4mn30jc/LyDvOaei6L ++pbl5CriGBTjaR80oXhK765K720BQeKUezri15bQlMaUGQRnzr53ZsqA4PEh6WCX +hUT2ibO32+uZFXzVQw8y/JUkPf76pZagi8DoLV+sfSbUtnpbQ8wyV2qqTM2eCuPi +RgUwXQi2WssKKzrqcgKil3vksHZozLtOmyZiNE4qfNxv+UGoIybJtZmB+9spY0Rw +5zBRuULycQIDAQABo3MwcTAdBgNVHQ4EFgQUCmZefzpizPrb3VbiIDhrA48ypB8w +HwYDVR0jBBgwFoAUCmZefzpizPrb3VbiIDhrA48ypB8wDAYDVR0TBAUwAwEB/zAh +BgNVHREEGjAYggl0ZXN0Mi5jb22CCyoudGVzdDIuY29tMA0GCSqGSIb3DQEBCwUA +A4IBgQA0nRTv1zm1ACugJFfYZfxZ0mLJfRUCFMmFfhy+vGiIu6QtnOFVw/tEOyMa +m78lBiqac15n3YWYiHiC5NFffTZ7XVlOjN2i4x2z2IJsHNa8tU80AX0Q/pizGK/d ++dzlcsGBb9MGT18h/B3/EYQFKLjUsr0zvDb1T0YDlRUsN3Bq6CvZmvfe9F7Yh4Z/ +XO5R+rX8w9c9A2jzM5isBw2qp/Ggn5RQodMwApEYkJdu80MuxaY6s3dssS4Ay8wP +VNFEeLcdauJ00ES1OnbnuNiYSiSMOgWBsnR+c8AaSRB/OZLYQQKGGYbq0tspwRjM +MGJRrI/jdKnvJQ8p02abdvA9ZuFChoD3Wg03qQ6bna68ZKPd9peBPpMrDDGDLkGI +NzZ6bLJKILnQkV6b1OHVnPDsKXfXjUTTNK/QLJejTXu9RpMBakYZMzs/SOSDtFlS +A+q25t6+46nvA8msUSBKyOGBX42mJcKvR4OgG44PfDjYfmjn2l+Dz/jNXDclpb+Q +XAzBnfM= +-----END CERTIFICATE----- diff --git a/conf/cert/test2.key b/conf/cert/test2.key new file mode 100644 index 0000000000000..c25d4e5bde9e4 --- /dev/null +++ b/conf/cert/test2.key @@ -0,0 +1,39 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIG5QIBAAKCAYEAxAYGTflXfNo1UNbMTNUZ34SRmdQ6tGldCDmaZyegqetDmtBA +yxAJsw6AW8u3/04RlCZsTYe4Qqw/Mvv1iN27ysaNKhB1sln1xw7fa6+WfWCfryIr +v2jtO+ZMKL4FrLI0D6Wj0jGsGbXAYGQ5TVk9qwZjOAEBswtNIOmzURjE1Hy3iX0+ +tNNGyHo1nJNigN5Uiwpn1JySmNZWfEc6QX4DNnMgklJ7amPnvZMMtgD9fD0jhfcv +5IfPBOWIZY1dva1TfrsscREe96exDN2V7uZA3aQTkakHtA9YH631XiaffSNz8vIO +85p6Lov6luXkKuIYFONpHzSheErvrkrvbQFB4pR7OuLXltCUxpQZBGfOvndmyoDg +8SHpYJeFRPaJs7fb65kVfNVDDzL8lSQ9/vqllqCLwOgtX6x9JtS2eltDzDJXaqpM +zZ4K4+JGBTBdCLZayworOupyAqKXe+SwdmjMu06bJmI0Tip83G/5QagjJsm1mYH7 +2yljRHDnMFG5QvJxAgMBAAECggGBAIELlkruwvGmlULKpWRPReEn3NJwLNVoJ56q +jUMri1FRWAgq4PzNahU+jrHfwxmHw3rMcK/5kQwTaOefh1y63E35uCThARqQroSE +/gBeb6vKWFVrIXG5GbQ9QBXyQroV9r/2Q4q0uJ+UTzklwbNx9G8KnXbY8s1zuyrX +rvzMWYepMwqIMSfJjuebzH9vZ4F+3BlMmF4XVUrYj8bw/SDwXB0UXXT2Z9j6PC1J +CS0oKbgIZ8JhoF3KKjcHBGwWTIf5+byRxeG+z99PBEBafm1Puw1vLfOjD3DN/fso +8xCEtD9pBPBJ+W97x/U+10oKetmP1VVEr2Ph8+s2VH1zsRF5jo5d0GtvJqOwIQJ7 +z3OHJ7lLODw0KAjB1NRXW4dTTUDm6EUuUMWFkGAV6YTyhNLAT0DyrUFJck9RiY48 +3QN8vSf3n/+3wwg1gzcJ9w3W4DUbvGqu86CaUQ4UegfYJlusY/3YGp5bGNQdxmws +lgIoSRrHp6UJKsP8Yl08MIvT/oNLgQKBwQD75SuDeyE0ukhEp0t6v+22d18hfSef +q3lLWMI1SQR9Kiem9Z1KdRkIVY8ZAHANm6D8wgjOODT4QZtiqJd2BJn3Xf+aLfCd +CW0hPvmGTcp/E4sDZ2u0HbIrUStz7ZcgXpjD2JJAJGEKY2Z7J65gnTqbqoBDrw1q +1+FqtikkHRte1UqxjwnWBpSdoRQFgNPHxPWffhML1xsD9Pk1B1b7JoakYcKsNoQM +oXUKPLxSZEtd0hIydqmhGYTa9QWBPNDlA5UCgcEAxzfGbOrPBAOOYZd3jORXQI6p +H7SddTHMQyG04i+OWUd0HZFkK7/k6r26GFmImNIsQMB26H+5XoKRFKn+sUl14xHY +FwB140j0XSav2XzT38UpJ9CptbgK1eKGQVp41xwRYjHVScE5hJuA3a1TKM0l26rp +hny/KaP+tXuqt9QbxcUN6efubNYyFP+m6nq2/XdX74bJuGpXLq8W0oFdiocO6tmF +4/Hsc4dCVrcwULqXQa0lJ57zZpfIPARqWM2847xtAoHBANVUNbDpg6rTJMc34722 +dAy3NhL3mqooH9aG+hsEls+l9uT4WFipqSScyU8ERuHPbt0BO1Hi2kFx1rYMUBG8 +PeT4b7NUutVUGV8xpUNv+FH87Bta6CUnjTAQUzuf+QCJ/NjIPrwh0yloG2+roIvk +PLF/CZfI1hUpdZfZZChYmkiLXPHZURw4gH6q33j1rOYf0WFc9aZua0vDmZame6zB +6P+oZ6VPmi/UQXoFC/y/QfDYK18fjfOI2DJTlnDoX4XErQKBwGc3M5xMz/MRcJyJ +oIwj5jzxbRibOJV2tpD1jsU9xG/nQHbtVEwCgTVKFXf2M3qSMhFeZn0xZ7ZayZY+ +OVJbcDO0lBPezjVzIAB/Qc7aCOBAQ4F4b+VRtHN6iPqlSESTK0KH9Szgas+UzeCM +o7BZEctNMu7WBSkq6ZXXu+zAfZ8q6HmPDA3hsFMG3dFQwSxzv+C/IhZlKkRqvNVV +50QVk5oEF4WxW0PECY/qG6NH+YQylDSB+zPlYf4Of5cBCWOoxQKBwQCeo37JpEAR +kYtqSjXkC5GpPTz8KR9lCY4SDuC1XoSVCP0Tk23GX6GGyEf4JWE+fb/gPEFx4Riu +7pvxRwq+F3LaAa/FFTNUpY1+8UuiMO7J0B1RkVXkyJjFUF/aQxAnOoZPmzrdZhWy +bpe2Ka+JS/aXSd1WRN1nmo/DarpWFvdLWZFwUt6zMziH40o1gyPHEuXOqVtf2QCe +Q6WC9xnEz4lbb/fR2TF9QRA4FtoRpDe/f3ZGIpWE0RdwyZZ6uA7T1+Q= +-----END RSA PRIVATE KEY----- diff --git a/conf/config.yaml b/conf/config.yaml index 8f691d59253b2..eba48dfa992c8 100644 --- a/conf/config.yaml +++ b/conf/config.yaml @@ -54,6 +54,8 @@ apisix: # - 127.0.0.0/24 # If we don't set any IP list, then any IP access is allowed by default. # - "::/64" # port_admin: 9180 # use a separate port + # https_admin: true # enable HTTPS when use a separate port for Admin API. + # Admin API will use conf/apisix_admin_api.crt and conf/apisix_admin_api.key as certificate. # Default token when use API to call for Admin API. # *NOTE*: Highly recommended to modify this value to protect APISIX's Admin API. @@ -89,9 +91,12 @@ apisix: enable: true enable_http2: true listen_port: 9443 - ssl_protocols: "TLSv1 TLSv1.1 TLSv1.2 TLSv1.3" - ssl_ciphers: "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA256:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA" - + ssl_protocols: "TLSv1.2 TLSv1.3" + ssl_ciphers: "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384" + key_encrypt_salt: "edd1c9f0985e76a2" # If not set, will save origin ssl key into etcd. + # If set this, must be a string of length 16. And it will encrypt ssl key with AES-128-CBC + # !!! So do not change it after saving your ssl, it can't decrypt the ssl keys have be saved if you change !! +# discovery: eureka # service discovery center nginx_config: # config for render the template to genarate nginx.conf error_log: "logs/error.log" error_log_level: "warn" # warn,error @@ -116,7 +121,19 @@ etcd: host: # it's possible to define multiple etcd hosts addresses of the same etcd cluster. - "http://etcd:2379" # multiple etcd address prefix: "/apisix" # apisix configurations prefix - timeout: 3 # 3 seconds + timeout: 30 # 3 seconds + # user: root # root username for etcd + # password: 5tHkHhYkjr6cQY # root password for etcd +#eureka: +# host: # it's possible to define multiple eureka hosts addresses of the same eureka cluster. +# - "http://127.0.0.1:8761" +# prefix: "/eureka/" +# fetch_interval: 30 # default 30s +# weight: 100 # default weight for node +# timeout: +# connect: 2000 # default 2000ms +# send: 2000 # default 2000ms +# read: 5000 # default 5000ms plugins: # plugin list - example-plugin @@ -145,6 +162,14 @@ plugins: # plugin list - proxy-mirror - kafka-logger - cors + - consumer-restriction + - syslog - batch-requests + - http-logger + - skywalking + - echo + - authz-keycloak + - uri-blocker + stream_plugins: - mqtt-proxy diff --git a/dashboard b/dashboard index cfb3ee7b87210..329b092dcaa7a 160000 --- a/dashboard +++ b/dashboard @@ -1 +1 @@ -Subproject commit cfb3ee7b8721076975c1deaff3e52da3ea4a312a +Subproject commit 329b092dcaa7a505dcdec86c667b6803f5863d94 diff --git a/doc/README.md b/doc/README.md index 238e1983cb1f1..c9a8f95b41f64 100644 --- a/doc/README.md +++ b/doc/README.md @@ -16,19 +16,19 @@ # limitations under the License. # --> -[Chinese](README_CN.md) + +[Chinese](./zh-cn/README.md) Reference Documentation ================== -* [APISIX Readme](../README.md) +* [APISIX Readme](./README.md) * [Architecture Design](architecture-design.md) * [Benchmark](benchmark.md) * [Getting Started Guide](getting-started.md) * [How to build Apache APISIX](how-to-build.md) * [Health Check](health-check.md): Enable health check on the upstream node, and will automatically filter unhealthy nodes during load balancing to ensure system stability. -* Router - * [radixtree](router-radixtree.md) +* [Router radixtree](router-radixtree.md) * [Stand Alone Model](stand-alone.md): Supports to load route rules from local yaml file, it is more friendly such as under the kubernetes(k8s). * [Stream Proxy](stream-proxy.md) * [Admin API](admin-api.md) @@ -51,7 +51,7 @@ Plugins * [proxy-rewrite](plugins/proxy-rewrite.md): Rewrite upstream request information. * [prometheus](plugins/prometheus.md): Expose metrics related to APISIX and proxied upstream services in Prometheus exposition format, which can be scraped by a Prometheus Server. * [OpenTracing](plugins/zipkin.md): Supports Zikpin and Apache SkyWalking. -* [grpc-transcode](plugins/grpc-transcoding.md): REST <--> gRPC transcoding. +* [grpc-transcode](plugins/grpc-transcode.md): REST <--> gRPC transcoding. * [serverless](plugins/serverless.md):Allows to dynamically run Lua code at *different* phase in APISIX. * [ip-restriction](plugins/ip-restriction.md): IP whitelist/blacklist. * [openid-connect](plugins/oauth.md) @@ -65,11 +65,20 @@ Plugins * [kafka-logger](plugins/kafka-logger.md): Log requests to External Kafka servers. * [cors](plugins/cors.md): Enable CORS(Cross-origin resource sharing) for your API. * [batch-requests](plugins/batch-requests.md): Allow you send mutiple http api via **http pipeline**. +* [authz-keycloak](plugins/authz-keycloak.md): Authorization with Keycloak Identity Server. +* [uri-blocker](plugins/uri-blocker.md): Block client request by URI. +* [oauth](plugins/oauth.md): Provides OAuth 2 authentication and introspection. -Deploy to the Cloud +Deploy ======= + ### AWS The recommended approach is to deploy APISIX with [AWS CDK](https://aws.amazon.com/cdk/) on [AWS Fargate](https://aws.amazon.com/fargate/) which helps you decouple the APISIX layer and the upstream layer on top of a fully-managed and secure serverless container compute environment with autoscaling capabilities. See [this guide](https://github.com/pahud/cdk-samples/blob/master/typescript/apisix/README.md) by [Pahud Hsieh](https://github.com/pahud) and learn how to provision the recommended architecture 100% in AWS CDK. + +### Kubernetes + +See [this guide](../kubernetes/README.md) and learn how to deploy apisix in Kubernetes. + diff --git a/doc/README_CN.md b/doc/README_CN.md deleted file mode 100644 index 1fc08c5abccd9..0000000000000 --- a/doc/README_CN.md +++ /dev/null @@ -1,68 +0,0 @@ - -[English](README.md) - -Reference document -================== - -* [APISIX 说明](../README_CN.md) -* [架构设计](architecture-design-cn.md) -* [压力测试](benchmark-cn.md) -* [如何构建 Apache APISIX](how-to-build-cn.md) -* [健康检查](health-check.md): 支持对上游节点的主动和被动健康检查,在负载均衡时自动过滤掉不健康的节点。 -* Router(路由) - * [radixtree](router-radixtree.md) - * [r3](router-r3.md) -* [独立运行模型](stand-alone-cn.md): 支持从本地 yaml 格式的配置文件启动,更适合 Kubernetes(k8s) 体系。 -* [TCP/UDP 动态代理](stream-proxy-cn.md) -* [管理 API](admin-api-cn.md) -* [变更日志](../CHANGELOG_CN.md) -* [代码风格](../CODE_STYLE.md) -* [常见问答](../FAQ_CN.md) - -插件 -=== - -* [插件热加载](plugins-cn.md):无需重启服务,完成插件热加载或卸载。 -* [HTTPS](https-cn.md):根据 TLS 扩展字段 SNI(Server Name Indication) 动态加载证书。 -* [动态负载均衡](architecture-design-cn.md#upstream):跨多个上游服务的动态负载均衡,目前已支持 round-robin 和一致性哈希算法。 -* [key-auth](plugins/key-auth-cn.md):基于 Key Authentication 的用户认证。 -* [JWT-auth](plugins/jwt-auth-cn.md):基于 [JWT](https://jwt.io/) (JSON Web Tokens) Authentication 的用户认证。 -* [basic-auth](plugins/basic-auth-cn.md):基于 basic auth 的用户认证。 -* [wolf-rbac](plugins/wolf-rbac-cn.md) 基于 *RBAC* 的用户认证及授权。 -* [limit-count](plugins/limit-count-cn.md):基于“固定窗口”的限速实现。 -* [limit-req](plugins/limit-req-cn.md):基于漏桶原理的请求限速实现。 -* [limit-conn](plugins/limit-conn-cn.md):限制并发请求(或并发连接)。 -* [proxy-rewrite](plugins/proxy-rewrite-cn.md): 支持自定义修改 proxy 到上游的信息。 -* [prometheus](plugins/prometheus-cn.md):以 Prometheus 格式导出 APISIX 自身的状态信息,方便被外部 Prometheus 服务抓取。 -* [OpenTracing](plugins/zipkin-cn.md):支持 Zikpin 和 Apache SkyWalking。 -* [grpc-transcode](plugins/grpc-transcoding-cn.md):REST <--> gRPC 转码。 -* [serverless](plugins/serverless-cn.md):允许在 APISIX 中的不同阶段动态运行 Lua 代码。 -* [ip-restriction](plugins/ip-restriction-cn.md): IP 黑白名单。 -* [openid-connect](plugins/oauth.md) -* [redirect](plugins/redirect-cn.md): URI 重定向。 -* [response-rewrite](plugins/response-rewrite-cn.md): 支持自定义修改返回内容的 `status code`、`body`、`headers`。 -* [fault-injection](plugins/fault-injection-cn.md):故障注入,可以返回指定的响应体、响应码和响应时间,从而提供了不同的失败场景下处理的能力,例如服务失败、服务过载、服务高延时等。 -* [proxy-cache](plugins/proxy-cache-cn.md):代理缓存插件提供缓存后端响应数据的能力。 -* [proxy-mirror](plugins/proxy-mirror-cn.md):代理镜像插件提供镜像客户端请求的能力。 -* [udp-logger](plugins/udp-logger.md): 将请求记录到UDP服务器 -* [tcp-logger](plugins/tcp-logger.md): 将请求记录到TCP服务器 -* [kafka-logger](plugins/kafka-logger-cn.md): 将请求记录到外部Kafka服务器。 -* [cors](plugins/cors-cn.md): 为你的API启用CORS. -* [batch-requests](plugins/batch-requests-cn.md): 以 **http pipeline** 的方式在网关一次性发起多个 `http` 请求。 diff --git a/doc/_navbar.md b/doc/_navbar.md new file mode 100644 index 0000000000000..1612e7d8a96ec --- /dev/null +++ b/doc/_navbar.md @@ -0,0 +1,22 @@ + + +- Translations + - [:uk: English](/) + - [:cn: 中文](/zh-cn/) diff --git a/doc/_sidebar.md b/doc/_sidebar.md new file mode 100644 index 0000000000000..6a9c1e5511679 --- /dev/null +++ b/doc/_sidebar.md @@ -0,0 +1,103 @@ + + +- Getting started + + - [Introduction](README.md) + - [Quick start](getting-started.md) + +- General + + - [Architecture](architecture-design.md) + + - [Benchmark](benchmark.md) + + - Installation + + - [How to build](how-to-build.md) + - [Install Dependencies](install-dependencies.md) + + - [HTTPS](https.md) + + - [Router](router-radixtree.md) + + - Plugins + + - [Develop Plugins](plugin-develop.md) + - [Hot Reload](plugins.md) + + - Proxy Modes + + - [GRPC Proxy](grpc-proxy.md) + - [Stream Proxy](stream-proxy.md) + +- Plugins + + - Authentication + + - [Key Auth](plugins/key-auth.md) + - [Basic Auth](plugins/basic-auth.md) + - [JWT Auth](plugins/jwt-auth.md) + - [Opend ID Connect](plugins/oauth.md) + + - General + + - [Redirect](plugins/redirect.md) + - [Serverless](plugins/serverless.md) + - [Batch Request](plugins/batch-requests.md) + - [Fault Injection](plugins/fault-injection.md) + - [MQTT Proxy](plugins/mqtt-proxy.md) + - [Proxy Cache](plugins/proxy-cache.md) + - [Proxy Mirror](plugins/proxy-mirror.md) + - [Echo](plugins/echo.md) + + - Transformations + + - [Response Rewrite](plugins/response-rewrite.md) + - [Proxy Rewrite](plugins/proxy-rewrite.md) + - [GRPC Transcoding](plugins/grpc-transcode.md) + + - Security + + - [Consumer Restriction](plugins/consumer-restriction.md) + - [Limit Connection](plugins/limit-conn.md) + - [Limit Count](plugins/limit-count.md) + - [Limit Request](plugins/limit-req.md) + - [CORS](plugins/cors.md) + - [IP Restriction](plugins/ip-restriction.md) + - [Keycloak Authorization](plugins/authz-keycloak.md) + - [RBAC Wolf](plugins/wolf-rbac.md) + + - Monitoring + + - [Prometheus](plugins/prometheus.md) + - [SKywalking](plugins/skywalking.md) + - [Zipkin](plugins/zipkin.md) + + - Loggers + + - [HTTP Logger](plugins/http-logger.md) + - [Kafka Logger](plugins/kafka-logger.md) + - [Syslog](plugins/syslog.md) + - [TCP Logger](plugins/tcp-logger.md) + - [UDP Logger](plugins/udp-logger.md) + +- Admin API + + - [Admin API](admin-api.md) diff --git a/doc/admin-api.md b/doc/admin-api.md index f60ccb6aef0ea..b1112e9ab2385 100644 --- a/doc/admin-api.md +++ b/doc/admin-api.md @@ -19,8 +19,6 @@ # Table of Contents -=== - * [Route](#route) * [Service](#service) * [Consumer](#consumer) @@ -41,7 +39,7 @@ |PUT |/apisix/admin/routes/{id}|{...}|Create resource by ID| |POST |/apisix/admin/routes |{...}|Create resource, and ID is generated by server| |DELETE |/apisix/admin/routes/{id}|NULL|Remove resource| -|PATCH |/apisix/admin/routes/{id}/{path}|{...}|Update targeted content| +|PATCH |/apisix/admin/routes/{id}|{...}|Update targeted content, if you want to remove an attribute, set the attribute value to null to remove| > URI Request Parameters: @@ -53,7 +51,8 @@ |Parameter |Required |Type |Description |Example| |---------|---------|----|-----------|----| -|desc |False |Auxiliary |Identifies route names, usage scenarios, and more.|customer xxxx| +|name |False |Auxiliary |Identifies route names.|customer-xxxx| +|desc |False |Auxiliary |route description, usage scenarios, and more.|customer xxxx| |uri |True |Match Rules|In addition to full matching such as `/foo/bar`、`/foo/gloo`, using different [Router](architecture-design.md#router) allows more advanced matching, see [Router](architecture-design.md#router) for more.|"/hello"| |host |False |Match Rules|Currently requesting a domain name, such as `foo.com`; pan-domain names such as `*.foo.com` are also supported.|"foo.com"| |hosts |False |Match Rules|The `host` in the form of a list means that multiple different hosts are allowed, and match any one of them.|{"foo.com", "*.bar.com"}| @@ -61,7 +60,7 @@ |remote_addrs|False |Match Rules|The `remote_addr` in the form of a list indicates that multiple different IP addresses are allowed, and match any one of them.|{"127.0.0.1", "192.0.0.0/8", "::1"}| |methods |False |Match Rules|If empty or without this option, there are no `method` restrictions, and it can be a combination of one or more: `GET`,`POST`,`PUT`,`DELETE`,`PATCH`, `HEAD`,`OPTIONS`,`CONNECT`,`TRACE`.|{"GET", "POST"}| |priority |False |Match Rules|If different routes contain the same `uri`, determine which route is matched first based on the attribute` priority`. Larger value means higher priority. The default value is 0.|priority = 10| -|vars |False |Match Rules |A list of one or more `{var, operator, val}` elements, like this: `{{var, operator, val}, {var, operator, val}, ...}`. For example: `{"arg_name", "==", "json"}` means that the current request parameter `name` is `json`. The `var` here is consistent with the internal variable name of Nginx, so you can also use `request_uri`, `host`, etc. For the operator part, the currently supported operators are `==`, `~=`,`>`, `<`, and `~~`. For the `>` and `<` operators, the result is first converted to `number` and then compared. See a list of [supported operators](#available-operators) |{{"arg_name", "==", "json"}, {"arg_age", ">", 18}}| +|vars |False |Match Rules |A list of one or more `{var, operator, val}` elements, like this: `{{var, operator, val}, {var, operator, val}, ...}}`. For example: `{"arg_name", "==", "json"}` means that the current request parameter `name` is `json`. The `var` here is consistent with the internal variable name of Nginx, so you can also use `request_uri`, `host`, etc. For the operator part, the currently supported operators are `==`, `~=`,`>`, `<`, and `~~`. For the `>` and `<` operators, the result is first converted to `number` and then compared. See a list of [supported operators](#available-operators) |{{"arg_name", "==", "json"}, {"arg_age", ">", 18}}| |filter_func|False|Match Rules|User-defined filtering function. You can use it to achieve matching requirements for special scenarios. This function accepts an input parameter named `vars` by default, which you can use to get Nginx variables.|function(vars) return vars["arg_name"] == "json" end| |plugins |False |Plugin|See [Plugin](architecture-design.md#plugin) for more || |upstream |False |Upstream|Enabled Upstream configuration, see [Upstream](architecture-design.md#upstream) for more|| @@ -83,6 +82,7 @@ Config Example: "hosts": ["a.com","b.com"], # A set of host. Host and hosts only need to be non-empty one. "plugins": {}, # Bound plugin "priority": 0, # If different routes contain the same `uri`, determine which route is matched first based on the attribute` priority`, the default value is 0. + "name": "route-xxx", "desc": "hello world", "remote_addr": "127.0.0.1", # Client IP "remote_addrs": ["127.0.0.1"], # A set of Client IP. Remote_addr and remo-te_addrs only need to be non-empty one. @@ -183,7 +183,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 |PUT |/apisix/admin/services/{id}|{...}|Create resource by ID| |POST |/apisix/admin/services |{...}|Create resource, and ID is generated by server| |DELETE |/apisix/admin/services/{id}|NULL|Remove resource| -|PATCH |/apisix/admin/routes/{id}/{path}|{...}|Update targeted content| +|PATCH |/apisix/admin/routes/{id}|{...}|Update targeted content, if you want to remove an attribute, set the attribute value to null to remove| > Request Body Parameters: @@ -192,7 +192,8 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 |plugins |False |Plugin|See [Plugin](architecture-design.md#plugin) for more || |upstream |False |Upstream|Enabled Upstream configuration, see [Upstream](architecture-design.md#upstream) for more|| |upstream_id|False |Upstream|Enabled upstream id, see [Upstream](architecture-design.md#upstream) for more || -|desc |False |Auxiliary |Identifies route names, usage scenarios, and more.|customer xxxx| +|name |False |Auxiliary |Identifies service names.|customer-xxxx| +|desc |False |Auxiliary |service usage scenarios, and more.|customer xxxx| Config Example: @@ -202,6 +203,7 @@ Config Example: "plugins": {}, # Bound plugin "upstream_id": "1", # upstream id, recommended "upstream": {}, # upstream, not recommended + "name": "service-test", "desc": "hello world", } ``` @@ -209,7 +211,7 @@ Config Example: Example: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/services/201 -X PUT -i -d ' +$ curl http://127.0.0.1:9080/apisix/admin/services/201 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "plugins": { "limit-count": { @@ -286,7 +288,7 @@ The binding authentication and authorization plug-in is a bit special. When it n Example: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/consumers/2 -X PUT -i -d ' +$ curl http://127.0.0.1:9080/apisix/admin/consumers/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "username": "jack", "plugins": { @@ -328,7 +330,7 @@ Return response from etcd currently. |PUT |/apisix/admin/upstreams/{id}|{...}|Create resource by ID| |POST |/apisix/admin/upstreams |{...}|Create resource, and ID is generated by server| |DELETE |/apisix/admin/upstreams/{id}|NULL|Remove resource| -|PATCH |/apisix/admin/upstreams/{id}/{path}|{...}|Update targeted content| +|PATCH |/apisix/admin/upstreams/{id}|{...}|Update targeted content, if you want to remove an attribute, set the attribute value to null to remove| > Request Body Parameters: @@ -345,7 +347,8 @@ In addition to the basic complex equalization algorithm selection, APISIX's Upst |retries |optional|Pass the request to the next upstream using the underlying Nginx retry mechanism, the retry mechanism is enabled by default and set the number of retries according to the number of backend nodes. If `retries` option is explicitly set, it will override the default value.| |enable_websocket|optional| enable `websocket`(boolean), default `false`.| |timeout|optional| Set the timeout for connection, sending and receiving messages. | -|desc |optional|Identifies route names, usage scenarios, and more.| +|name |optional|Identifies upstream names| +|desc |optional|upstream usage scenarios, and more.| Config Example: @@ -371,6 +374,7 @@ Config Example: "checks": {}, # Health check parameters "hash_on": "", "key": "", + "name": "upstream-for-test", "desc": "hello world", } ``` @@ -378,15 +382,15 @@ Config Example: Example: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100 -i -X PUT -d ' -> { -> "type": "roundrobin", -> "nodes": { -> "127.0.0.1:80": 1, -> "127.0.0.2:80": 2, -> "foo.com:80": 3 -> } -> }' +$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -i -X PUT -d ' +{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:80":1, + "127.0.0.2:80":2, + "foo.com:80":3 + } +}' HTTP/1.1 201 Created Date: Thu, 26 Dec 2019 04:19:34 GMT Content-Type: text/plain diff --git a/doc/architecture-design.md b/doc/architecture-design.md index 9edaadb378b45..098808e1616d8 100644 --- a/doc/architecture-design.md +++ b/doc/architecture-design.md @@ -17,7 +17,7 @@ # --> -[Chinese](architecture-design-cn.md) +[Chinese](zh-cn/architecture-design.md) ## Table of Contents @@ -106,7 +106,7 @@ Server: APISIX web server When we receive a successful response, it indicates that the route was successfully created. -For specific options of Route, please refer to [Admin API](admin-api-cn.md#route). +For specific options of Route, please refer to [Admin API](zh-cn/admin-api.md#route). [Back to top](#Table-of-contents) @@ -233,8 +233,9 @@ In addition to the basic complex equalization algorithm selection, APISIX's Upst |Name |Optional|Description| |------- |-----|------| |type |required|`roundrobin` supports the weight of the load, `chash` consistency hash, pick one of them.| -|nodes |required if `k8s_deployment_info` not configured|Hash table, the key of the internal element is the upstream machine address list, the format is `Address + Port`, where the address part can be IP or domain name, such as `192.168.1.100:80`, `foo.com:80`, etc. Value is the weight of the node. In particular, when the weight value is `0`, it has a special meaning, which usually means that the upstream node is invalid and never wants to be selected.| -|k8s_deployment_info |required if `nodes` not configured|fields: `namespace`、`deploy_name`、`service_name`、`port`、`backend_type`, `port` is number, `backend_type` is `pod` or `service`, others is string. | +|nodes |required if `service_name` and `k8s_deployment_info` not configured|Hash table, the key of the internal element is the upstream machine address list, the format is `Address + Port`, where the address part can be IP or domain name, such as `192.168.1.100:80`, `foo.com:80`, etc. Value is the weight of the node. In particular, when the weight value is `0`, it has a special meaning, which usually means that the upstream node is invalid and never wants to be selected.| +|service_name |required if `nodes` and `k8s_deployment_info` not configured |The name of the upstream service and used with the registry, refer to [Integration service discovery registry](discovery.md).| +|k8s_deployment_info |required if `nodes` and `service_name` not configured|fields: `namespace`、`deploy_name`、`service_name`、`port`、`backend_type`, `port` is number, `backend_type` is `pod` or `service`, others is string. | |hash_on |optional|This option is only valid if the `type` is `chash`. Supported types `vars`(Nginx variables), `header`(custom header), `cookie`, `consumer`, the default value is `vars`.| |key |required|This option is only valid if the `type` is `chash`. Find the corresponding node `id` according to `hash_on` and `key`. When `hash_on` is set as `vars`, `key` is the required parameter, for now, it support nginx built-in variables like `uri, server_name, server_addr, request_uri, remote_port, remote_addr, query_string, host, hostname, arg_***`, `arg_***` is arguments in the request line, [Nginx variables list](http://nginx.org/en/docs/varindex.html). When `hash_on` is set as `header`, `key` is the required parameter, and `header name` is customized. When `hash_on` is set to `cookie`, `key` is the required parameter, and `cookie name` is customized. When `hash_on` is set to `consumer`, `key` does not need to be set. In this case, the `key` adopted by the hash algorithm is the `consumer_id` authenticated. If the specified `hash_on` and `key` can not fetch values, it will be fetch `remote_addr` by default.| |checks |optional|Configure the parameters of the health check. For details, refer to [health-check](health-check.md).| @@ -350,7 +351,7 @@ Here are some examples of configurations using different `hash_on` types: Create a consumer object: ```shell -curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ` +curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "username": "jack", "plugins": { @@ -358,7 +359,7 @@ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f1 "key": "auth-jack" } } -}` +}' ``` Create route object and enable `key-auth` plugin authentication: @@ -536,6 +537,35 @@ HTTP/1.1 503 Service Temporarily Unavailable ``` +Use the [consumer-restriction](zh-cn/plugins/consumer-restriction.md) plug-in to restrict the access of Jack to this API. + +# Add Jack to the blacklist +$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins": { + "key-auth": {}, + "consumer-restriction": { + "blacklist": [ + "jack" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' + +# Repeated tests, all return 403; Jack is forbidden to access this API +$ curl http://127.0.0.1:9080/hello -H 'apikey: auth-one' -I +HTTP/1.1 403 +... + +``` + [Back to top](#Table-of-contents) ## Global Rule @@ -547,6 +577,7 @@ We can register a global [Plugin](#Plugin) with `GlobalRule`: curl -X PUT \ https://{apisix_listen_address}/apisix/admin/global_rules/1 \ -H 'Content-Type: application/json' \ + -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ -d '{ "plugins": { "limit-count": { diff --git a/doc/benchmark.md b/doc/benchmark.md index eaf4e4cc6cdac..9c3c016d0fcff 100644 --- a/doc/benchmark.md +++ b/doc/benchmark.md @@ -17,7 +17,7 @@ # --> -[Chinese](benchmark-cn.md) +[Chinese](zh-cn/benchmark.md) ### Benchmark Environments @@ -35,13 +35,13 @@ and the response size was 1KB. The x-axis means the size of CPU core, and the y-axis is QPS. - + #### Latency Note the y-axis latency in **microsecond(μs)** not millisecond. - + #### Flame Graph @@ -80,18 +80,18 @@ and the response size was 1KB. The x-axis means the size of CPU core, and the y-axis is QPS. - + #### Latency Note the y-axis latency in **microsecond(μs)** not millisecond. - + #### Flame Graph The result of Flame Graph: -![flamegraph-2](../doc/images/flamegraph-2.jpg) +![flamegraph-2](./images/flamegraph-2.jpg) And if you want to run the benchmark test in your machine, you should run another Nginx to listen 80 port. diff --git a/doc/discovery.md b/doc/discovery.md new file mode 100644 index 0000000000000..96d36e8e3663c --- /dev/null +++ b/doc/discovery.md @@ -0,0 +1,244 @@ + +[Chinese](zh-cn/discovery.md) + +# Integration service discovery registry + +* [**Summary**](#Summary) +* [**How extend the discovery client?**](#how-extend-the-discovery-client) + * [**Basic steps**](#basic-steps) + * [**the example of Eureka**](#the-example-of-eureka) + * [**Implementation of eureka.lua**](#implementation-of-eurekalua) + * [**How convert Eureka's instance data to APISIX's node?**](#how-convert-eurekas-instance-data-to-apisixs-node) +* [**Configuration for discovery client**](#configuration-for-discovery-client) + * [**Select discovery client**](#select-discovery-client) + * [**Configuration for Eureka**](#configuration-for-eureka) +* [**Upstream setting**](#upstream-setting) + +## Summary + +When system traffic changes, the number of servers of the upstream service also increases or decreases, or the server needs to be replaced due to its hardware failure. If the gateway maintains upstream service information through configuration, the maintenance costs in the microservices architecture pattern are unpredictable. Furthermore, due to the untimely update of these information, will also bring a certain impact for the business, and the impact of human error operation can not be ignored. So it is very necessary for the gateway to automatically get the latest list of service instances through the service registry。As shown in the figure below: + +![](./images/discovery.png) + +1. When the service starts, it will report some of its information, such as the service name, IP, port and other information to the registry. The services communicate with the registry using a mechanism such as a heartbeat, and if the registry and the service are unable to communicate for a long time, the instance will be cancel.When the service goes offline, the registry will delete the instance information. +2. The gateway gets service instance information from the registry in near-real time. +3. When the user requests the service through the gateway, the gateway selects one instance from the registry for proxy. + +Common registries: Eureka, Etcd, Consul, Zookeeper, Nacos etc. + +## How extend the discovery client? + +### Basic steps + +It is very easy for APISIX to extend the discovery client. , the basic steps are as follows + +1. Add the implementation of registry client in the 'apisix/discovery/' directory; + +2. Implement the `_M. init_worker()` function for initialization and the `_M. nodes(service_name)` function for obtaining the list of service instance nodes; + +3. Convert the registry data into data in APISIX; + + +### the example of Eureka + +#### Implementation of eureka.lua + +First, add [`eureka.lua`](../apisix/discovery/eureka.lua) in the `apisix/discovery/` directory; + +Then implement the `_M.init_worker()` function for initialization and the `_M.nodes(service_name)` function for obtaining the list of service instance nodes in ` eureka.lua`: + + ```lua + local _M = { + version = 1.0, + } + + + function _M.nodes(service_name) + ... ... + end + + + function _M.init_worker() + ... ... + end + + + return _M + ``` + +#### How convert Eureka's instance data to APISIX's node? + +Here's an example of Eureka's data: + +```json +{ + "applications": { + "application": [ + { + "name": "USER-SERVICE", # service name + "instance": [ + { + "instanceId": "192.168.1.100:8761", + "hostName": "192.168.1.100", + "app": "USER-SERVICE", # service name + "ipAddr": "192.168.1.100", # IP address + "status": "UP", + "overriddenStatus": "UNKNOWN", + "port": { + "$": 8761, + "@enabled": "true" + }, + "securePort": { + "$": 443, + "@enabled": "false" + }, + "metadata": { + "management.port": "8761", + "weight": 100 # Setting by 'eureka.instance.metadata-map.weight' of the spring boot application + }, + "homePageUrl": "http://192.168.1.100:8761/", + "statusPageUrl": "http://192.168.1.100:8761/actuator/info", + "healthCheckUrl": "http://192.168.1.100:8761/actuator/health", + ... ... + } + ] + } + ] + } +} +``` + +Deal with the Eureka's instance data need the following steps : + +1. select the UP instance. When the value of `overriddenStatus` is "UP" or the value of `overriddenStatus` is "UNKNOWN" and the value of `status` is "UP". +2. Host. The `ipAddr` is the IP address of instance; and must be IPv4 or IPv6. +3. Port. If the value of `port["@enabled"]` is equal to "true", using the value of `port["\$"]`, If the value of `securePort["@enabled"]` is equal to "true", using the value of `securePort["\$"]`. +4. Weight. `local weight = metadata.weight or local_conf.eureka.weight or 100` + +The result of this example is as follows: + +```json +[ + { + "host" : "192.168.1.100", + "port" : 8761, + "weight" : 100, + "metadata" : { + "management.port": "8761", + } + } +] +``` + +## Configuration for discovery client + +### Select discovery client + +Add the following configuration to `conf/config.yaml` and select one discovery client type which you want: + +```yaml +apisix: + discovery: eureka +``` + +This name should be consistent with the file name of the implementation registry in the `apisix/discovery/` directory. + +The supported discovery client: Eureka. + +### Configuration for Eureka + +Add following configuration in `conf/config.yaml` : + +```yaml +eureka: + host: # it's possible to define multiple eureka hosts addresses of the same eureka cluster. + - "http://${usename}:${passowrd}@${eureka_host1}:${eureka_port1}" + - "http://${usename}:${passowrd}@${eureka_host2}:${eureka_port2}" + prefix: "/eureka/" + fetch_interval: 30 # 30s + weight: 100 # default weight for node + timeout: + connect: 2000 # 2000ms + send: 2000 # 2000ms + read: 5000 # 5000ms +``` + + +## Upstream setting + +Here is an example of routing a request with a uri of "/user/*" to a service which named "user-service" in the registry : + +```shell +$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +{ + "uri": "/user/*", + "upstream": { + "service_name": "USER-SERVICE", + "type": "roundrobin" + } +}' + +HTTP/1.1 201 Created +Date: Sat, 31 Aug 2019 01:17:15 GMT +Content-Type: text/plain +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX web server + +{"node":{"value":{"uri":"\/user\/*","upstream": {"service_name": "USER-SERVICE", "type": "roundrobin"}},"createdIndex":61925,"key":"\/apisix\/routes\/1","modifiedIndex":61925},"action":"create"} +``` + +Because the upstream interface URL may have conflict, usually in the gateway by prefix to distinguish: + +```shell +$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +{ + "uri": "/a/*", + "plugins": { + "proxy-rewrite" : { + regex_uri: ["^/a/(.*)", "/${1}"] + } + } + "upstream": { + "service_name": "A-SERVICE", + "type": "roundrobin" + } +}' + +$ curl http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +{ + "uri": "/b/*", + "plugins": { + "proxy-rewrite" : { + regex_uri: ["^/b/(.*)", "/${1}"] + } + } + "upstream": { + "service_name": "B-SERVICE", + "type": "roundrobin" + } +}' +``` + +Suppose both A-SERVICE and B-SERVICE provide a `/test` API. The above configuration allows access to A-SERVICE's `/test` API through `/a/test` and B-SERVICE's `/test` API through `/b/test`. + +**Notice**:When configuring `upstream.service_name`, `upstream.nodes` will no longer take effect, but will be replaced by 'nodes' obtained from the registry. + + diff --git a/doc/getting-started.md b/doc/getting-started.md index a576329f155a5..ae432d4643422 100644 --- a/doc/getting-started.md +++ b/doc/getting-started.md @@ -17,7 +17,7 @@ # --> -[Chinese](getting-started-cn.md) +[Chinese](zh-cn/getting-started.md) # Quick Start Guide diff --git a/doc/grpc-proxy.md b/doc/grpc-proxy.md index 22e3297340f00..50404aef49d88 100644 --- a/doc/grpc-proxy.md +++ b/doc/grpc-proxy.md @@ -17,7 +17,7 @@ # --> -[中文](grpc-proxy-cn.md) +[中文](zh-cn/grpc-proxy.md) # grpc-proxy diff --git a/doc/health-check.md b/doc/health-check.md index b0b062deb175a..1b78831e95700 100644 --- a/doc/health-check.md +++ b/doc/health-check.md @@ -16,7 +16,8 @@ # limitations under the License. # --> -## Health Checks for Upstream + +# Health Checks for Upstream Health Check of APISIX is based on [lua-resty-healthcheck](https://github.com/Kong/lua-resty-healthcheck), you can use it for upstream. @@ -77,26 +78,26 @@ contains: `active` or `passive`. * `active`: To enable active health checks, you need to specify the configuration items under `checks.active` in the Upstream object configuration. - * `active.http_path`: The HTTP GET request path used to detect if the upstream is healthy. - * `active.host`: The HTTP request host used to detect if the upstream is healthy. + * `active.http_path`: The HTTP GET request path used to detect if the upstream is healthy. + * `active.host`: The HTTP request host used to detect if the upstream is healthy. - The threshold fields of `healthy` are: - * `active.healthy.interval`: Interval between health checks for healthy targets (in seconds), the minimum is 1. - * `active.healthy.successes`: The number of success times to determine the target is healthy, the minimum is 1. + The threshold fields of `healthy` are: + * `active.healthy.interval`: Interval between health checks for healthy targets (in seconds), the minimum is 1. + * `active.healthy.successes`: The number of success times to determine the target is healthy, the minimum is 1. - The threshold fields of `unhealthy` are: - * `active.unhealthy.interval`: Interval between health checks for unhealthy targets (in seconds), the minimum is 1. - * `active.unhealthy.http_failures`: The number of http failures times to determine the target is unhealthy, the minimum is 1. - * `active.req_headers`: Additional request headers. Array format, so you can fill in multiple headers. + The threshold fields of `unhealthy` are: + * `active.unhealthy.interval`: Interval between health checks for unhealthy targets (in seconds), the minimum is 1. + * `active.unhealthy.http_failures`: The number of http failures times to determine the target is unhealthy, the minimum is 1. + * `active.req_headers`: Additional request headers. Array format, so you can fill in multiple headers. * `passive`: To enable passive health checks, you need to specify the configuration items under `checks.passive` in the Upstream object configuration. - The threshold fields of `healthy` are: - * `passive.healthy.http_statuses`: If the current response code is equal to any of these, set the upstream node to the `healthy` state. Otherwise ignore this request. - * `passive.healthy.successes`: Number of successes in proxied traffic (as defined by `passive.healthy.http_statuses`) to consider a target healthy, as observed by passive health checks. + The threshold fields of `healthy` are: + * `passive.healthy.http_statuses`: If the current response code is equal to any of these, set the upstream node to the `healthy` state. Otherwise ignore this request. + * `passive.healthy.successes`: Number of successes in proxied traffic (as defined by `passive.healthy.http_statuses`) to consider a target healthy, as observed by passive health checks. - The threshold fields of `unhealthy` are: - * `passive.unhealthy.http_statuses`: If the current response code is equal to any of these, set the upstream node to the `unhealthy` state. Otherwise ignore this request. - * `passive.unhealthy.tcp_failures`: Number of TCP failures in proxied traffic to consider a target unhealthy, as observed by passive health checks. - * `passive.unhealthy.timeouts`: Number of timeouts in proxied traffic to consider a target unhealthy, as observed by passive health checks. - * `passive.unhealthy.http_failures`: Number of HTTP failures in proxied traffic (as defined by `passive.unhealthy.http_statuses`) to consider a target unhealthy, as observed by passive health checks. + The threshold fields of `unhealthy` are: + * `passive.unhealthy.http_statuses`: If the current response code is equal to any of these, set the upstream node to the `unhealthy` state. Otherwise ignore this request. + * `passive.unhealthy.tcp_failures`: Number of TCP failures in proxied traffic to consider a target unhealthy, as observed by passive health checks. + * `passive.unhealthy.timeouts`: Number of timeouts in proxied traffic to consider a target unhealthy, as observed by passive health checks. + * `passive.unhealthy.http_failures`: Number of HTTP failures in proxied traffic (as defined by `passive.unhealthy.http_statuses`) to consider a target unhealthy, as observed by passive health checks. diff --git a/doc/how-to-build.md b/doc/how-to-build.md index e1b8d8b672ad9..b1a276eba37f9 100644 --- a/doc/how-to-build.md +++ b/doc/how-to-build.md @@ -34,21 +34,21 @@ You can install Apache APISIX in a variety of ways, including source code packag You need to download the Apache source release first: ```shell -wget http://www.apache.org/dist/incubator/apisix/1.2/apache-apisix-1.2-incubating-src.tar.gz -tar zxvf apache-apisix-1.2-incubating-src.tar.gz +wget http://www.apache.org/dist/incubator/apisix/1.4/apache-apisix-1.4-incubating-src.tar.gz +tar zxvf apache-apisix-1.4-incubating-src.tar.gz ``` Install the Lua libraries that the runtime depends on: ```shell -cd apache-apisix-1.2-incubating +cd apache-apisix-1.4-incubating make deps ``` ### Installation via RPM package (CentOS 7) ```shell -sudo yum install -y https://github.com/apache/incubator-apisix/releases/download/1.2/apisix-1.2-0.el7.noarch.rpm +sudo yum install -y https://github.com/apache/incubator-apisix/releases/download/1.4/apisix-1.4-0.el7.noarch.rpm ``` ### Installation via Luarocks (macOS not supported) @@ -64,11 +64,11 @@ sudo sh -c "$(curl -fsSL https://raw.githubusercontent.com/apache/incubator-apis > Install the specified version via Luarocks: ```shell -# Install version 1.2 -sudo luarocks install --lua-dir=/path/openresty/luajit apisix 1.2 +# Install version 1.4 +sudo luarocks install --lua-dir=/path/openresty/luajit apisix 1.4 # old luarocks not support the `lua-dir` parameter, you can remove this option -sudo luarocks install apisix 1.2 +sudo luarocks install apisix 1.4 ``` ## 3. Manage (start/stop) APISIX Server diff --git a/doc/https.md b/doc/https.md index 5e7aa0edba627..c2091a72db475 100644 --- a/doc/https.md +++ b/doc/https.md @@ -17,7 +17,7 @@ # --> -[Chinese](https-cn.md) +[Chinese](zh-cn/https.md) ### HTTPS diff --git a/doc/images/apache.png b/doc/images/apache.png new file mode 100644 index 0000000000000..d0075db9e3691 Binary files /dev/null and b/doc/images/apache.png differ diff --git a/doc/images/apisix.png b/doc/images/apisix.png index 153025180aae6..625f9b241cbca 100644 Binary files a/doc/images/apisix.png and b/doc/images/apisix.png differ diff --git a/doc/images/discovery-cn.png b/doc/images/discovery-cn.png new file mode 100644 index 0000000000000..7b448c2ca1e4e Binary files /dev/null and b/doc/images/discovery-cn.png differ diff --git a/doc/images/discovery.png b/doc/images/discovery.png new file mode 100644 index 0000000000000..6b592e3027a5a Binary files /dev/null and b/doc/images/discovery.png differ diff --git a/doc/images/plugin/authz-keycloak.png b/doc/images/plugin/authz-keycloak.png new file mode 100644 index 0000000000000..6b6ae84a89d94 Binary files /dev/null and b/doc/images/plugin/authz-keycloak.png differ diff --git a/doc/images/plugin/skywalking-1.png b/doc/images/plugin/skywalking-1.png new file mode 100644 index 0000000000000..9560c19d9ea19 Binary files /dev/null and b/doc/images/plugin/skywalking-1.png differ diff --git a/doc/images/plugin/skywalking-2.png b/doc/images/plugin/skywalking-2.png new file mode 100644 index 0000000000000..f7d9d4ca0f489 Binary files /dev/null and b/doc/images/plugin/skywalking-2.png differ diff --git a/doc/images/plugin/skywalking-3.png b/doc/images/plugin/skywalking-3.png new file mode 100644 index 0000000000000..691b306113506 Binary files /dev/null and b/doc/images/plugin/skywalking-3.png differ diff --git a/doc/images/plugin/skywalking-4.png b/doc/images/plugin/skywalking-4.png new file mode 100644 index 0000000000000..4a8fb15e9b48a Binary files /dev/null and b/doc/images/plugin/skywalking-4.png differ diff --git a/doc/images/plugin/skywalking-5.png b/doc/images/plugin/skywalking-5.png new file mode 100644 index 0000000000000..f24235ec278b7 Binary files /dev/null and b/doc/images/plugin/skywalking-5.png differ diff --git a/doc/index.html b/doc/index.html new file mode 100644 index 0000000000000..61aaf2e0aad34 --- /dev/null +++ b/doc/index.html @@ -0,0 +1,52 @@ + + + + + + + Document + + + + + + + + +
+ + + + + diff --git a/doc/install-dependencies.md b/doc/install-dependencies.md index 989ca9ba36c71..0a5ad26247303 100644 --- a/doc/install-dependencies.md +++ b/doc/install-dependencies.md @@ -20,6 +20,7 @@ # Install Dependencies - [CentOS 6](#centos-6) - [CentOS 7](#centos-7) +- [Fedora 31 & 32](#fedora-31--32) - [Ubuntu 16.04 & 18.04](#ubuntu-1604--1804) - [Debian 9 & 10](#debian-9--10) - [Mac OSX](#mac-osx) @@ -65,6 +66,21 @@ sudo yum install -y etcd openresty curl git gcc luarocks lua-devel sudo service etcd start ``` +Fedora 31 & 32 +============== + +```shell +# add OpenResty source +sudo yum install yum-utils +sudo yum-config-manager --add-repo https://openresty.org/package/fedora/openresty.repo + +# install OpenResty, etcd and some compilation tools +sudo yum install -y etcd openresty curl git gcc luarocks lua-devel + +# start etcd server +sudo etcd --enable-v2=true & +``` + Ubuntu 16.04 & 18.04 ==================== diff --git a/doc/plugin-develop.md b/doc/plugin-develop.md index f6a6cb66bbe4d..a85c208eb4ca1 100644 --- a/doc/plugin-develop.md +++ b/doc/plugin-develop.md @@ -16,7 +16,7 @@ # limitations under the License. # --> -[中文](plugin-develop-cn.md) +[中文](zh-cn/plugin-develop.md) # table of contents @@ -98,6 +98,12 @@ plugins: # plugin list Note : the order of the plugins is not related to the order of execution. +If your plugin has a new code directory of its own, you will need to modify the `Makefile` to create directory, such as: +``` +$(INSTALL) -d $(INST_LUADIR)/apisix/plugins/skywalking +$(INSTALL) apisix/plugins/skywalking/*.lua $(INST_LUADIR)/apisix/plugins/skywalking/ +``` + ## schema and check Write [Json Schema](https://json-schema.org) descriptions and check functions. similarly, take the key-auth plugin as an example to see its diff --git a/doc/plugins.md b/doc/plugins.md index 081d2d7385fc3..bf39cf15f0a41 100644 --- a/doc/plugins.md +++ b/doc/plugins.md @@ -17,7 +17,7 @@ # --> -[Chinese](plugins-cn.md) +[Chinese](zh-cn/plugins.md) ## Hot reload diff --git a/doc/plugins/authz-keycloak.md b/doc/plugins/authz-keycloak.md new file mode 100644 index 0000000000000..39a3dcae35a0d --- /dev/null +++ b/doc/plugins/authz-keycloak.md @@ -0,0 +1,135 @@ + + +[中文](../zh-cn/plugins/authz-keycloak-cn.md) + +# Summary +- [**Name**](#name) +- [**Attributes**](#attributes) +- [**How To Enable**](#how-to-enable) +- [**Test Plugin**](#test-plugin) +- [**Disable Plugin**](#disable-plugin) +- [**Examples**](#examples) + + +## Name + +`authz-keycloak` is an authorization plugin to be used with the Keycloak Identity Server. Keycloak is an OAuth/OIDC and +UMA compliant Ideneity Server. Although, its developed to working in conjunction with Keycloak it should work with any +OAuth/OIDC and UMA compliant identity providers as well. + +For more information on Keycloak, refer to [Keycloak Authorization Docs](https://www.keycloak.org/docs/latest/authorization_services) for more information. + +## Attributes + +|Name |Requirement |Description| +|--------- |-------- |-----------| +| token_endpoint|required |A OAuth2-compliant Token Endpoint that supports the `urn:ietf:params:oauth:grant-type:uma-ticket` grant type.| +| grant_type |optional |Default value is `urn:ietf:params:oauth:grant-type:uma-ticket`.| +| audience |optional |The client identifier of the resource server to which the client is seeking access. This parameter is mandatory in case the permission parameter is defined.| +| permissions |optional |This parameter is optional. A string representing a set of one or more resources and scopes the client is seeking access. The format of the string must be: `RESOURCE_ID#SCOPE_ID`.| +| timeout |optional |Timeout for the http connection with the Identity Server. Default is 3 seconds| +| policy_enforcement_mode|required |Enforcing or Permissive.| + + +### Policy Enforcement Mode + +Specifies how policies are enforced when processing authorization requests sent to the server. + +**Enforcing** + +- (default mode) Requests are denied by default even when there is no policy associated with a given resource. + +**Permissive** + +- Requests are allowed even when there is no policy associated with a given resource. + + +## How To Enable + +Create a `route` and enable the `authz-keycloak` plugin on the route: + +```shell +curl http://127.0.0.1:9080/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/get", + "plugins": { + "authz-keycloak": { + "token_endpoint": "http://127.0.0.1:8090/auth/realms/{client_id}/protocol/openid-connect/token", + "permissions": ["resource name#scope name"], + "audience": "Client ID" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8080": 1 + } + } +} +``` + + +## Test Plugin + +```shell +curl http://127.0.0.1:9080/get -H 'Authorization: Bearer {JWT Token}' +``` + + +## Disable Plugin + +Remove the corresponding json configuration in the plugin configuration to disable the `authz-keycloak`. +APISIX plugins are hot-reloaded, therefore no need to restart APISIX. + +```shell +curl http://127.0.0.1:9080/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/get", + "plugins": { + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8080": 1 + } + } +} +``` + +## Examples + +Checkout the unit test for of the authz-keycloak.t to understand how the authorization policies can be integrated into your +API workflows. Run the following docker image and visit `http://localhost:8090` to view the associated policies for the unit tests. + +```bash +docker run -e KEYCLOAK_USER=admin -e KEYCLOAK_PASSWORD=123456 -p 8090:8080 sshniro/keycloak-apisix +``` + +The following image shows how the policies are configures in the Keycloak server. + +![Keycloak policy design](../images/plugin/authz-keycloak.png) + +## Future Development + +- Currently the `authz-plugin` requires to define the resource name and required scopes in order to enforce policies for the routes. +However, Keycloak's official adapters (Java, JS) also provides path matching by querying Keycloak paths dynamically, and +lazy loading the paths to identity resources. Future version on authz-plugin will support this functionality. + +- Support to read scope and configurations from the Keycloak JSON File diff --git a/doc/plugins/basic-auth.md b/doc/plugins/basic-auth.md index c564358727a74..dda4cba69352f 100644 --- a/doc/plugins/basic-auth.md +++ b/doc/plugins/basic-auth.md @@ -17,7 +17,7 @@ # --> -# [Chinese](basic-auth-cn.md) +# [Chinese](../zh-cn/plugins/basic-auth.md) # Summary diff --git a/doc/plugins/batch-requests.md b/doc/plugins/batch-requests.md index 081c5904ddac5..b3ea993e4cadf 100644 --- a/doc/plugins/batch-requests.md +++ b/doc/plugins/batch-requests.md @@ -17,7 +17,7 @@ # --> -# [Chinese](batch-requests-cn.md) +# [Chinese](../zh-cn/plugins/batch-requests.md) # Summary @@ -32,6 +32,10 @@ `batch-requests` can accept mutiple request and send them from `apisix` via [http pipeline](https://en.wikipedia.org/wiki/HTTP_pipelining),and return a aggregated response to client,this can significantly improve performance when the client needs to access multiple APIs. +> **Tips** +> +> The HTTP headers for the outer batch request, except for the Content- headers such as Content-Type, apply to every request in the batch. If you specify a given HTTP header in both the outer request and an individual call, then the individual call header's value overrides the outer batch request header's value. The headers for an individual call apply only to that call. + ## Attributes None @@ -41,7 +45,7 @@ None Default enbaled ## Batch Api Request/Response -The plugin will create a api in `apisix` to handle your aggregation request. +The plugin will create a api in `apisix` to handle your batch request. ### Batch Api Request: @@ -67,7 +71,7 @@ Response is `Array` of [HttpResponse](#HttpResponse). #### HttpResponse | ParameterName | Type | Description | -| --- | --- | --- | --- | --- | +| --- | --- | --- | | status | Integer | http status code | | reason | String | http reason phrase | | body | String | http response body | diff --git a/doc/plugins/consumer-restriction.md b/doc/plugins/consumer-restriction.md new file mode 100644 index 0000000000000..60dafa8e8ac3a --- /dev/null +++ b/doc/plugins/consumer-restriction.md @@ -0,0 +1,134 @@ + + +[Chinese](../zh-cn/plugins/consumer-restriction.md) + +# Summary +- [**Name**](#name) +- [**Attributes**](#attributes) +- [**How To Enable**](#how-to-enable) +- [**Test Plugin**](#test-plugin) +- [**Disable Plugin**](#disable-plugin) + + +## Name + +The `consumer-restriction` can restrict access to a Service or a Route by either +whitelisting or blacklisting consumers. Support single or multiple consumers. + +## Attributes + +|Name |Requirement |Description| +|---------|--------|-----------| +|whitelist|optional |List of consumers to whitelist| +|blacklist|optional |List of consumers to blacklist| + +One of `whitelist` or `blacklist` must be specified, and they can not work +together. + +## How To Enable + +Creates a route or service object, and enable plugin `consumer-restriction`. + +```shell +curl http://127.0.0.1:9080/apisix/admin/consumers/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +{ + "username": "jack1", + "plugins": { + "basic-auth": { + "username":"jack2019", + "password": "123456" + } + } +}' + +curl http://127.0.0.1:9080/apisix/admin/consumers/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +{ + "username": "jack2", + "plugins": { + "basic-auth": { + "username":"jack2020", + "password": "123456" + } + } +}' + +curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {}, + "consumer-restriction": { + "whitelist": [ + "jack1" + ] + } + } +}' +``` + +## Test Plugin + +Requests from jack1: + +```shell +$ curl -u jack2019:123456 http://127.0.0.1:9080/index.html +HTTP/1.1 200 OK +... +``` + +Requests from jack2: + +```shell +$ curl -u jack2020:123456 http://127.0.0.1:9080/index.html -i +HTTP/1.1 403 Forbidden +... +{"message":"You are not allowed"} +``` + + +## Disable Plugin + +When you want to disable the `consumer-restriction` plugin, it is very simple, +you can delete the corresponding json configuration in the plugin configuration, +no need to restart the service, it will take effect immediately: + +```shell +$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {} + } +}' +``` + +The `consumer-restriction` plugin has been disabled now. It works for other plugins. diff --git a/doc/plugins/cors.md b/doc/plugins/cors.md index 6e6114ee215c2..2cf0f55c897ff 100644 --- a/doc/plugins/cors.md +++ b/doc/plugins/cors.md @@ -17,7 +17,7 @@ # --> -# [Chinese](cors-cn.md) +# [Chinese](../zh-cn/plugins/cors.md) # Summary diff --git a/doc/plugins/echo.md b/doc/plugins/echo.md new file mode 100644 index 0000000000000..2e2b4051354ac --- /dev/null +++ b/doc/plugins/echo.md @@ -0,0 +1,95 @@ + + +# Summary +- [**Name**](#name) +- [**Attributes**](#attributes) +- [**How To Enable**](#how-to-enable) +- [**Test Plugin**](#test-plugin) +- [**Disable Plugin**](#disable-plugin) + + +## Name + +`echo` is a a useful plugin to help users understand as fully as possible how to develop an APISIX plugin. + +This plugin addresses the corresponding functionality in the common phases such as init, rewrite, access, balancer +, header filer, body filter and log. + +## Attributes + +|Name |Requirement |Description| +|--------- |-------- |-----------| +| before_body |optional | Body before the filter phase.| +| body |optional | Body to replace upstream response.| +| after_body |optional |Body after the modification of filter phase.| + + +## How To Enable + +The following is an example on how to enable the echo plugin for a specific route. + +```shell +curl http://127.0.0.1:9080/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins": { + "echo": { + "before_body": "before the body modification " + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' +``` + +## Test Plugin + +* success: + +```shell +$ curl -i http://127.0.0.1:9080/hello +HTTP/1.1 200 OK +... +before the body modification hello world +``` + +## Disable Plugin + +Remove the corresponding json configuration in the plugin configuration to disable the `echo`. +APISIX plugins are hot-reloaded, therefore no need to restart APISIX. + +```shell +$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value=' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/doc/plugins/fault-injection.md b/doc/plugins/fault-injection.md index cb895ef620928..7a90b8abcfee9 100644 --- a/doc/plugins/fault-injection.md +++ b/doc/plugins/fault-injection.md @@ -17,7 +17,7 @@ # --> -# [Chinese](fault-injection-cn.md) +# [Chinese](../zh-cn/plugins/fault-injection.md) ## Name diff --git a/doc/plugins/grpc-transcoding.md b/doc/plugins/grpc-transcode.md similarity index 98% rename from doc/plugins/grpc-transcoding.md rename to doc/plugins/grpc-transcode.md index 870f9c540ee29..cc9ca4ac3f2af 100644 --- a/doc/plugins/grpc-transcoding.md +++ b/doc/plugins/grpc-transcode.md @@ -17,7 +17,7 @@ # --> -# [Chinese](grpc-transcoding-cn.md) +# [Chinese](../zh-cn/plugins/grpc-transcode.md) ## Name diff --git a/doc/plugins/http-logger.md b/doc/plugins/http-logger.md new file mode 100644 index 0000000000000..b72325dfbe6bf --- /dev/null +++ b/doc/plugins/http-logger.md @@ -0,0 +1,100 @@ + + +# Summary +- [**Name**](#name) +- [**Attributes**](#attributes) +- [**How To Enable**](#how-to-enable) +- [**Test Plugin**](#test-plugin) +- [**Disable Plugin**](#disable-plugin) + + +## Name + +`http-logger` is a plugin which push Log data requests to HTTP/HTTPS servers. + +This will provide the ability to send Log data requests as JSON objects to Monitoring tools and other HTTP servers. + +## Attributes + +|Name |Requirement |Description| +|--------- |-------- |-----------| +|uri |required |URI of the server| +|authorization |optional |Any authorization headers| +|keepalive |optional |Time to keep the connection alive after sending a request| +|name |optional |A unique identifier to identity the logger| +|batch_max_size |optional |Max size of each batch, default is 1000| +|inactive_timeout|optional |maximum age in seconds when the buffer will be flushed if inactive, default is 5s| +|buffer_duration|optional |Maximum age in seconds of the oldest entry in a batch before the batch must be processed, default is 5| +|max_retry_count|optional |Maximum number of retries before removing from the processing pipe line; default is zero| +|retry_delay |optional |Number of seconds the process execution should be delayed if the execution fails; default is 1| + + +## How To Enable + +The following is an example on how to enable the http-logger for a specific route. + +```shell +curl http://127.0.0.1:9080/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins": { + "http-logger": { + "uri": "127.0.0.1:80/postendpoint?param=1", + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +## Test Plugin + +* success: + +```shell +$ curl -i http://127.0.0.1:9080/hello +HTTP/1.1 200 OK +... +hello, world +``` + +## Disable Plugin + +Remove the corresponding json configuration in the plugin configuration to disable the `http-logger`. +APISIX plugins are hot-reloaded, therefore no need to restart APISIX. + +```shell +$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value=' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/doc/plugins/ip-restriction.md b/doc/plugins/ip-restriction.md index ddec4744e1d00..68b50f65d4133 100644 --- a/doc/plugins/ip-restriction.md +++ b/doc/plugins/ip-restriction.md @@ -17,7 +17,7 @@ # --> -[Chinese](ip-restriction-cn.md) +[Chinese](../zh-cn/plugins/ip-restriction.md) # Summary - [**Name**](#name) @@ -141,7 +141,7 @@ you can delete the corresponding json configuration in the plugin configuration, no need to restart the service, it will take effect immediately: ```shell -$ curl http://127.0.0.1:2379/v2/keys/apisix/routes/1 -X PUT -d value=' +$ curl http://127.0.0.1:2379/v2/keys/apisix/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value=' { "uri": "/index.html", "plugins": {}, diff --git a/doc/plugins/jwt-auth.md b/doc/plugins/jwt-auth.md index ed57175159a7f..f2e8bfa855569 100644 --- a/doc/plugins/jwt-auth.md +++ b/doc/plugins/jwt-auth.md @@ -17,7 +17,7 @@ # --> -[Chinese](jwt-auth-cn.md) +[Chinese](../zh-cn/plugins/jwt-auth.md) # Summary - [**Name**](#name) diff --git a/doc/plugins/kafka-logger-cn.md b/doc/plugins/kafka-logger-cn.md deleted file mode 100644 index 53572f4606885..0000000000000 --- a/doc/plugins/kafka-logger-cn.md +++ /dev/null @@ -1,129 +0,0 @@ - - -# Summary -- [**定义**](#name) -- [**属性列表**](#attributes) -- [**信息**](#info) -- [**如何开启**](#how-to-enable) -- [**测试插件**](#test-plugin) -- [**禁用插件**](#disable-plugin) - -## 定义 - -`kafka-logger` 是一个插件,可用作ngx_lua nginx模块的Kafka客户端驱动程序。 - -这将提供将Log数据请求作为JSON对象发送到外部Kafka集群的功能。 - -## 属性列表 - -|属性名称 |必选项 |描述| -|--------- |--------|-----------| -| broker_list |必要的| 一系列的Kafka经纪人。| -| kafka_topic |必要的| 定位主题以推送数据。| -| timeout |可选的|上游发送数据超时。| -| async |可选的|布尔值,用于控制是否执行异步推送。| -| key |必要的|消息的密钥。| -| max_retry |可选的|没有重试次数。| - -## 信息 - -异步与同步数据推送之间的区别。 - -1. 同步模型 - - 如果成功,则返回当前代理和分区的偏移量(** cdata:LL **)。 - 如果发生错误,则返回“ nil”,并带有描述错误的字符串。 - -2. 在异步模型中 - - 消息将首先写入缓冲区。 - 当缓冲区超过`batch_num`时,它将发送到kafka服务器, - 或每个`flush_time`刷新缓冲区。 - - 如果成功,则返回“ true”。 - 如果出现错误,则返回“ nil”,并带有描述错误的字符串(“缓冲区溢出”)。 - -##### 样本经纪人名单 - -此插件支持一次推送到多个经纪人。如以下示例所示,指定外部kafka服务器的代理,以使此功能生效。 - -```json -{ - "127.0.0.1":9092, - "127.0.0.1":9093 -} -``` - -## 如何开启 - -1. 这是有关如何为特定路由启用kafka-logger插件的示例。 - -```shell -curl http://127.0.0.1:9080/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' -{ - "plugins": { - "kafka-logger": { - "broker_list" : - { - "127.0.0.1":9092 - }, - "kafka_topic" : "test2", - "key" : "key1" - } - }, - "upstream": { - "nodes": { - "127.0.0.1:1980": 1 - }, - "type": "roundrobin" - }, - "uri": "/hello" -}' -``` - -## 测试插件 - -* 成功: - -```shell -$ curl -i http://127.0.0.1:9080/hello -HTTP/1.1 200 OK -... -hello, world -``` - -## 禁用插件 - -当您要禁用`kafka-logger`插件时,这很简单,您可以在插件配置中删除相应的json配置,无需重新启动服务,它将立即生效: - -```shell -$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -X PUT -d value=' -{ - "methods": ["GET"], - "uri": "/hello", - "plugins": {}, - "upstream": { - "type": "roundrobin", - "nodes": { - "127.0.0.1:1980": 1 - } - } -}' -``` diff --git a/doc/plugins/kafka-logger.md b/doc/plugins/kafka-logger.md index 83316d397a3f1..809ea5203180b 100644 --- a/doc/plugins/kafka-logger.md +++ b/doc/plugins/kafka-logger.md @@ -32,6 +32,11 @@ This will provide the ability to send Log data requests as JSON objects to external Kafka clusters. +This plugin provides the ability to push Log data as a batch to you're external Kafka topics. In case if you did not recieve the log data don't worry give it some time it will automatically send the logs after the timer function expires in our Batch Processor. + +For more info on Batch-Processor in Apache APISIX please refer. +[Batch-Processor](../batch-processor.md) + ## Attributes |Name |Requirement |Description| @@ -39,7 +44,6 @@ This will provide the ability to send Log data requests as JSON objects to exter | broker_list |required | An array of Kafka brokers.| | kafka_topic |required | Target topic to push data.| | timeout |optional |Timeout for the upstream to send data.| -| async |optional |Boolean value to control whether to perform async push.| | key |required |Key for the message.| |name |optional |A unique identifier to identity the batch processor| |batch_max_size |optional |Max size of each batch, default is 1000| @@ -50,21 +54,12 @@ This will provide the ability to send Log data requests as JSON objects to exter ## Info -Difference between async and the sync data push. - -1. In sync model - - In case of success, returns the offset (** cdata: LL **) of the current broker and partition. - In case of errors, returns `nil` with a string describing the error. - -2. In async model - - The `message` will write to the buffer first. - It will send to the kafka server when the buffer exceed the `batch_num`, - or every `flush_time` flush the buffer. +The `message` will write to the buffer first. +It will send to the kafka server when the buffer exceed the `batch_max_size`, +or every `buffer_duration` flush the buffer. - In case of success, returns `true`. - In case of errors, returns `nil` with a string describing the error (`buffer overflow`). +In case of success, returns `true`. +In case of errors, returns `nil` with a string describing the error (`buffer overflow`). ##### Sample broker list @@ -124,7 +119,7 @@ Remove the corresponding json configuration in the plugin configuration to disab APISIX plugins are hot-reloaded, therefore no need to restart APISIX. ```shell -$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -X PUT -d value=' +$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value=' { "methods": ["GET"], "uri": "/hello", diff --git a/doc/plugins/key-auth.md b/doc/plugins/key-auth.md index acb02f77bb96d..2e28f4865876a 100644 --- a/doc/plugins/key-auth.md +++ b/doc/plugins/key-auth.md @@ -17,7 +17,7 @@ # --> -[Chinese](key-auth-cn.md) +[Chinese](../zh-cn/plugins/key-auth.md) # Summary - [**Name**](#name) diff --git a/doc/plugins/limit-conn.md b/doc/plugins/limit-conn.md index db963f777d316..3fe8a26901f38 100644 --- a/doc/plugins/limit-conn.md +++ b/doc/plugins/limit-conn.md @@ -17,7 +17,7 @@ # --> -[Chinese](limit-conn-cn.md) +[Chinese](../zh-cn/plugins/limit-conn.md) # Summary - [**Name**](#name) @@ -28,7 +28,7 @@ ## Name -Limiting request concurrency (or concurrent connections) plugin for Apisix. +Limiting request concurrency plugin. ## Attributes @@ -38,7 +38,9 @@ Limiting request concurrency (or concurrent connections) plugin for Apisix. |burst |required|is the number of excessive concurrent requests (or connections) allowed to be delayed.| |default_conn_delay |required|is the default processing latency of a typical connection (or request).| |key |required|is the user specified key to limit the concurrency level.
For example, one can use the host name (or server zone) as the key so that we limit concurrency per host name. Otherwise, we can also use the client address as the key so that we can avoid a single client from flooding our service with too many parallel connections or requests.
Now accept those as key: "remote_addr"(client's IP), "server_addr"(server's IP), "X-Forwarded-For/X-Real-IP" in request header.| -|rejected_code |required| The HTTP status code returned when the request exceeds the threshold is rejected. The default is 503.| +|rejected_code |required| The HTTP status code returned when the request exceeds `conn` + `burst` will be rejected. The default is 503.| + +**Key can be customized by the user, only need to modify a line of code of the plug-in to complete. It is a security consideration that is not open in the plugin.** ## How To Enable diff --git a/doc/plugins/limit-count.md b/doc/plugins/limit-count.md index 44eca68e84629..4d03a1d060401 100644 --- a/doc/plugins/limit-count.md +++ b/doc/plugins/limit-count.md @@ -17,7 +17,7 @@ # --> -[Chinese](limit-count-cn.md) +[Chinese](../zh-cn/plugins/limit-count.md) # Summary @@ -38,13 +38,16 @@ Limit request rate by a fixed number of requests in a given time window. |count |required|the specified number of requests threshold.| |time_window |required|the time window in seconds before the request count is reset.| |key |required|the user specified key to limit the rate. Here is fully key list: "remote_addr", "server_addr", "http_x_real_ip", "http_x_forwarded_for".| -|rejected_code |optional|The HTTP status code returned when the request exceeds the threshold is rejected. The default is 503.| +|rejected_code |optional|The HTTP status code returned when the request exceeds the threshold is rejected, default 503.| |policy |optional|The rate-limiting policies to use for retrieving and incrementing the limits. Available values are `local`(the counters will be stored locally in-memory on the node, default value) and `redis`(counters are stored on a Redis server and will be shared across the nodes, usually used it to do the global speed limit).| |redis_host |optional|When using the `redis` policy, this property specifies the address of the Redis server.| |redis_port |optional|When using the `redis` policy, this property specifies the port of the Redis server. The default port is 6379.| |redis_password|optional|When using the `redis` policy, this property specifies the password of the Redis server.| |redis_timeout |optional|When using the `redis` policy, this property specifies the timeout in milliseconds of any command submitted to the Redis server. The default timeout is 1000 ms(1 second).| + +**Key can be customized by the user, only need to modify a line of code of the plug-in to complete. It is a security consideration that is not open in the plugin.** + ## How To Enable Here's an example, enable the `limit count` plugin on the specified route: diff --git a/doc/plugins/limit-req.md b/doc/plugins/limit-req.md index f388756189f17..5a5d7c2c26e76 100644 --- a/doc/plugins/limit-req.md +++ b/doc/plugins/limit-req.md @@ -17,7 +17,7 @@ # --> -# [Chinese](limit-req-cn.md) +# [Chinese](../zh-cn/plugins/limit-req.md) # Summary @@ -37,8 +37,10 @@ limit request rate using the "leaky bucket" method. |--------- |--------|-----------| |rate |required|is the specified request rate (number per second) threshold. Requests exceeding this rate (and below `burst`) will get delayed to conform to the rate.| |burst |required|is the number of excessive requests per second allowed to be delayed. Requests exceeding this hard limit will get rejected immediately.| -|rejected_code |required|The HTTP status code returned when the request exceeds the threshold is rejected. The default is 503.| | key |required|is the user specified key to limit the rate, now accept those as key: "remote_addr"(client's IP), "server_addr"(server's IP), "X-Forwarded-For/X-Real-IP" in request header.| +|rejected_code |optional|The HTTP status code returned when the request exceeds the threshold is rejected. The default is 503.| + +**Key can be customized by the user, only need to modify a line of code of the plug-in to complete. It is a security consideration that is not open in the plugin.** ## How To Enable diff --git a/doc/plugins/mqtt-proxy.md b/doc/plugins/mqtt-proxy.md index 83ff9f9cc863c..49900b7dfe01f 100644 --- a/doc/plugins/mqtt-proxy.md +++ b/doc/plugins/mqtt-proxy.md @@ -17,7 +17,7 @@ # --> -[中文](mqtt-proxy-cn.md) +[中文](../zh-cn/plugins/mqtt-proxy.md) # Summary - [**Name**](#name) diff --git a/doc/plugins/prometheus.md b/doc/plugins/prometheus.md index 1b57a97cef3f4..3f882f7107a52 100644 --- a/doc/plugins/prometheus.md +++ b/doc/plugins/prometheus.md @@ -17,7 +17,7 @@ # --> -[Chinese](prometheus-cn.md) +[Chinese](../zh-cn/plugins/prometheus.md) # prometheus This plugin exposes metrics in Prometheus Exposition format. diff --git a/doc/plugins/proxy-cache.md b/doc/plugins/proxy-cache.md index c8e668c810772..61931a3d0ad89 100644 --- a/doc/plugins/proxy-cache.md +++ b/doc/plugins/proxy-cache.md @@ -17,7 +17,7 @@ # --> -[Chinese](proxy-cache-cn.md) +[Chinese](/doc/zh-cn/plugins/proxy-cache.md) # proxy-cache @@ -50,7 +50,7 @@ Note: 1: enable the proxy-cache plugin for a specific route : ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -X PUT -d ' +curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "proxy-cache": { @@ -130,7 +130,7 @@ Remove the corresponding JSON in the plugin configuration to disable the plugin ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -X PUT -d ' +curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "plugins": {}, diff --git a/doc/plugins/proxy-mirror.md b/doc/plugins/proxy-mirror.md index 3b6ee542a3449..c209d73d65ab1 100644 --- a/doc/plugins/proxy-mirror.md +++ b/doc/plugins/proxy-mirror.md @@ -17,7 +17,7 @@ # --> -[Chinese](proxy-mirror-cn.md) +[Chinese](../zh-cn/plugins/proxy-mirror.md) # proxy-mirror @@ -39,7 +39,7 @@ The proxy-mirror plugin, which provides the ability to mirror client requests. 1: enable the proxy-mirror plugin for a specific route : ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -X PUT -d ' +curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "proxy-mirror": { @@ -81,7 +81,7 @@ Remove the corresponding JSON in the plugin configuration to disable the plugin ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -X PUT -d ' +curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "plugins": {}, diff --git a/doc/plugins/proxy-rewrite.md b/doc/plugins/proxy-rewrite.md index fa73438671f1c..6221c5a9761e3 100644 --- a/doc/plugins/proxy-rewrite.md +++ b/doc/plugins/proxy-rewrite.md @@ -17,7 +17,7 @@ # --> -[Chinese](proxy-rewrite-cn.md) +[Chinese](../zh-cn/plugins/proxy-rewrite.md) # Summary - [**Name**](#name) diff --git a/doc/plugins/redirect.md b/doc/plugins/redirect.md index 187cdc90f0af7..b288c37935d7d 100644 --- a/doc/plugins/redirect.md +++ b/doc/plugins/redirect.md @@ -17,7 +17,7 @@ # --> -[Chinese](redirect-cn.md) +[Chinese](../zh-cn/plugins/redirect.md) # Summary - [**Name**](#name) @@ -34,8 +34,9 @@ URI redirect. |Name |Requirement|Description| |------- |-----|------| -|uri |required| New uri which can contain Nginx variable, eg: `/test/index.html`, `$uri/index.html`. You can refer to variables in a way similar to `${xxx}` to avoid ambiguity, eg: `${uri}foo/index.html`. If you just need the original `$` character, add `\` in front of it, like this one: `/\$foo/index.html`. If you refer to a variable name that does not exist, this will not produce an error, and it will be used as an empty string.| -|ret_code|optional|Response code, the default value is `302`.| +|uri |required, need pick one from `uri` and `http_to_https`| New uri which can contain Nginx variable, eg: `/test/index.html`, `$uri/index.html`. You can refer to variables in a way similar to `${xxx}` to avoid ambiguity, eg: `${uri}foo/index.html`. If you just need the original `$` character, add `\` in front of it, like this one: `/\$foo/index.html`. If you refer to a variable name that does not exist, this will not produce an error, and it will be used as an empty string.| +|ret_code|optional, only works with `uri`|Response code, the default value is `302`.| +|http_to_https|required, need pick one from `uri` and `http_to_https`|Boolean value. The default value is `false`. When it is set to `ture` and the request is HTTP, will be automatically redirected to HTTPS with 301 response code, and the URI will keep the same as client request.| ## How To Enable @@ -101,6 +102,19 @@ We can check the response code and the response header `Location`. It shows that the `redirect` plugin is in effect. + Here is an example of redirect HTTP to HTTPS: +```shell +curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/hello", + "plugins": { + "redirect": { + "http_to_https": true + } + } +}' +``` + ## Disable Plugin When you want to disable the `redirect` plugin, it is very simple, diff --git a/doc/plugins/response-rewrite.md b/doc/plugins/response-rewrite.md index b2c26c5bd9619..f1c8576ce3643 100644 --- a/doc/plugins/response-rewrite.md +++ b/doc/plugins/response-rewrite.md @@ -17,9 +17,10 @@ # --> -[Chinese](response-rewrite-cn.md) +[Chinese](../zh-cn/plugins/response-rewrite.md) # Summary + - [**Name**](#name) - [**Attributes**](#attributes) - [**How To Enable**](#how-to-enable) @@ -27,21 +28,25 @@ - [**Disable Plugin**](#disable-plugin) ## Name + response rewrite plugin, rewrite the content from upstream. **senario**: + 1. can set `Access-Control-Allow-*` series field to support CORS(Cross-origin Resource Sharing). 2. we can set customized `status_code` and `Location` field in header to achieve redirect, you can alse use [redirect](redirect.md) plugin if you just want a redirection. ## Attributes + |Name |Requirement|Description| |------- |-----|------| -|status_code |optional| New `status code` to client| +|status_code |optional| New `status code` to client, keep the original response code by default.| |body |optional| New `body` to client, and the content-length will be reset too.| |body_base64 |optional| This is a boolean value,identify if `body` in configuration need base64 decoded before rewrite to client.| |headers |optional| Set the new `headers` for client, can set up multiple. If it exists already from upstream, will rewrite the header, otherwise will add the header. You can set the corresponding value to an empty string to remove a header. | ## How To Enable + Here's an example, enable the `response rewrite` plugin on the specified route: ```shell @@ -68,6 +73,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f1 ``` ## Test Plugin + Testing based on the above examples : ```shell @@ -76,6 +82,7 @@ curl -X GET -i http://127.0.0.1:9080/test/index.html It will output like below,no matter what kind of content from upstream. ``` + HTTP/1.1 200 OK Date: Sat, 16 Nov 2019 09:15:12 GMT Transfer-Encoding: chunked @@ -89,9 +96,11 @@ X-Server-status: on This means that the `response rewrite` plugin is in effect. ## Disable Plugin + When you want to disable the `response rewrite` plugin, it is very simple, you can delete the corresponding json configuration in the plugin configuration, no need to restart the service, it will take effect immediately: + ```shell curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { @@ -107,4 +116,3 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f1 ``` The `response rewrite` plugin has been disabled now. It works for other plugins. - diff --git a/doc/plugins/serverless.md b/doc/plugins/serverless.md index bb6a30fb35f08..d2e370e399c54 100644 --- a/doc/plugins/serverless.md +++ b/doc/plugins/serverless.md @@ -17,7 +17,7 @@ # --> -[Chinese](serverless-cn.md) +[Chinese](../zh-cn/plugins/serverless.md) # Summary diff --git a/doc/plugins/skywalking.md b/doc/plugins/skywalking.md new file mode 100644 index 0000000000000..1eefe7c86707e --- /dev/null +++ b/doc/plugins/skywalking.md @@ -0,0 +1,187 @@ + + +[Chinese](../zh-cn/plugins/skywalking.md) + +# Summary +- [**Summary**](#Summary) + - [**Name**](#Name) + - [**Attributes**](#Attributes) + - [**How To Enable**](#How-To-Enable) + - [**Test Plugin**](#Test-Plugin) + - [**Run Skywalking Example**](#Run-Skywalking-Example) + - [**Disable Plugin**](#Disable-Plugin) + - [**Upstream services(Code With SpringBoot)**](#Upstream-services(Code-With-SpringBoot)) + +## Name + +**Skywalking**(https://github.com/apache/skywalking) is an OpenTracing plugin.\ +The skywalking server can supports both http and grpc protocols . The APISIX client only support http protocols. +## Attributes +|Name |Requirement |Description| +|-------|-------|-------| +|endpoint|required| the http endpoint of Skywalking ,for example: http://127.0.0.1:12800| +|sample_ratio|required| the ratio of sample, the minimum is 0.00001, the maximum is 1| +|service_name|optional| service name for skywalking reporter, the default values is **APISIX**| + +## How To Enable + +Here's an example, enable the skywalking plugin on the specified route: + +```shell +curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "methods": ["GET"], + "uris": [ + "/uid/*" + ], + "plugins": { + "skywalking": { + "endpoint": "http://10.110.149.175:12800", + "sample_ratio": 1, + "service_name": "APISIX_SERVER" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "10.110.149.175:8089": 1 + } + } +}' +``` +You can open dashboard with a browser:`http://127.0.0.1:9080/apisix/dashboard/`,to complete the above operation through the web interface, first add a route:\ +![](../images/plugin/skywalking-1.png)\ +Then add skywalking plugin:\ +![](../images/plugin/skywalking-2.png) +## Test Plugin + +### Run-Skywalking-Example + +#### e.g. +1. Run Skywalking Server: + - By default,use H2 storage , start skywalking directly + ``` + sudo docker run --name skywalking -d -p 1234:1234 -p 11800:11800 -p 12800:12800 --restart always apache/skywalking-oap-server + ``` + + - Of Course,you can use elasticsearch storage + 1. Firstly, you should install elasticsearch: + ``` + sudo docker run -d --name elasticsearch -p 9200:9200 -p 9300:9300 --restart always -e "discovery.type=single-node" elasticsearch:6.7.2 + ``` + 2. You can install ElasticSearch management page: elasticsearch-hq(Optional) + ``` + sudo docker run -d --name elastic-hq -p 5000:5000 --restart always elastichq/elasticsearch-hq + ``` + 3. Run skywalking server: + ``` + sudo docker run --name skywalking -d -p 1234:1234 -p 11800:11800 -p 12800:12800 --restart always --link elasticsearch:elasticsearch -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server + ``` +2. Skywalking WebUI: + 1. Run SkyWalking webUI Server: + ``` + sudo docker run --name skywalking-ui -d -p 8080:8080 --link skywalking:skywalking -e SW_OAP_ADDRESS=skywalking:12800 --restart always apache/skywalking-ui + ``` + 2. Open the webUI of skywalking: + You can open dashboard with a browser: http://10.110.149.175:8080 .it will be a successful install as follow: + ![](../images/plugin/skywalking-3.png) + +3. Test: + - Access to upstream services through access apisix: + ```bash + $ curl -v http://10.110.149.192:9080/uid/12 + HTTP/1.1 200 OK + OK + ... + ``` + - Open the webUI of skyWalking: + ``` + http://10.110.149.175:8080/ + ``` + You can see the topology of all service\ + ![](../../doc/images/plugin/skywalking-4.png)\ + You can also see the tracer of all service\ + ![](../../doc/images/plugin/skywalking-5.png) + +## Disable Plugin + +When you want to disable the skyWalking plugin, it is very simple, + you can delete the corresponding json configuration in the plugin configuration, + no need to restart the service, it will take effect immediately: + +```shell +$ curl http://127.0.0.1:2379/v2/keys/apisix/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value=' +{ + "methods": ["GET"], + "uris": [ + "/uid/*" + ], + "plugins": { + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "10.110.149.175:8089": 1 + } + } +}' +``` + +The skywalking plugin has been disabled now. It works for other plugins. + + +## Upstream services(Code With SpringBoot) + +```java +package com.lenovo.ai.controller; + +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RestController; +import javax.servlet.http.HttpServletRequest; + +/** + * @author cyxinda + * @create 2020-05-29 14:02 + * @desc skywalking test controller + **/ +@RestController +public class TestController { + @RequestMapping("/uid/{count}") + public String getUidList(@PathVariable("count") String countStr, HttpServletRequest request) { + System.out.println("counter:::::-----"+countStr); + return "OK"; + } +} +``` +Configuring the skywalking agent, when starting the service. +update the file of agent/config/agent.config +``` +agent.service_name=yourservername +collector.backend_service=10.110.149.175:11800 +``` +Run the script: +``` +nohup java -javaagent:/root/skywalking/app/agent/skywalking-agent.jar \ +-jar /root/skywalking/app/app.jar \ +--server.port=8089 \ +2>&1 > /root/skywalking/app/logs/nohup.log & +``` + diff --git a/doc/plugins/syslog.md b/doc/plugins/syslog.md new file mode 100644 index 0000000000000..41a9713798001 --- /dev/null +++ b/doc/plugins/syslog.md @@ -0,0 +1,105 @@ + + +# Summary +- [**Name**](#name) +- [**Attributes**](#attributes) +- [**How To Enable**](#how-to-enable) +- [**Test Plugin**](#test-plugin) +- [**Disable Plugin**](#disable-plugin) + + +## Name + +`sys` is a plugin which push Log data requests to Syslog. + +This will provide the ability to send Log data requests as JSON objects. + +## Attributes + +|Name |Requirement |Description| +|--------- |-------- |-----------| +|host |required | IP address or the Hostname.| +|port |required | Target upstream port.| +|timeout |optional |Timeout for the upstream to send data.| +|tls |optional |Boolean value to control whether to perform SSL verification| +|flush_limit |optional |If the buffered messages' size plus the current message size reaches (>=) this limit (in bytes), the buffered log messages will be written to log server. Default to 4096 (4KB).| +|drop_limit |optional |If the buffered messages' size plus the current message size is larger than this limit (in bytes), the current log message will be dropped because of limited buffer size. Default drop_limit is 1048576 (1MB).| +|sock_type|optional |IP protocol type to use for transport layer. Can be either "tcp" or "udp". Default is "tcp".| +|max_retry_times|optional |Max number of retry times after a connect to a log server failed or send log messages to a log server failed.| +|retry_interval|optional |The time delay (in ms) before retry to connect to a log server or retry to send log messages to a log server, default to 100 (0.1s).| +|pool_size |optional |Keepalive pool size used by sock:keepalive. Default to 10.| + +## How To Enable + +The following is an example on how to enable the sys-logger for a specific route. + +```shell +curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "username": "foo", + "plugins": { + "plugins": { + "syslog": { + "host" : "127.0.0.1", + "port" : 5044, + "flush_limit" : 1 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" + } +}' +``` + +## Test Plugin + +* success: + +```shell +$ curl -i http://127.0.0.1:9080/hello +HTTP/1.1 200 OK +... +hello, world +``` + +## Disable Plugin + +Remove the corresponding json configuration in the plugin configuration to disable the `sys-logger`. +APISIX plugins are hot-reloaded, therefore no need to restart APISIX. + +```shell +$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value=' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/doc/plugins/tcp-logger.md b/doc/plugins/tcp-logger.md index 7949444feeee3..1a6d4433bc4ca 100644 --- a/doc/plugins/tcp-logger.md +++ b/doc/plugins/tcp-logger.md @@ -18,19 +18,24 @@ --> # Summary + - [**Name**](#name) - [**Attributes**](#attributes) - [**How To Enable**](#how-to-enable) - [**Test Plugin**](#test-plugin) - [**Disable Plugin**](#disable-plugin) - ## Name `tcp-logger` is a plugin which push Log data requests to TCP servers. This will provide the ability to send Log data requests as JSON objects to Monitoring tools and other TCP servers. +This plugin provides the ability to push Log data as a batch to you're external TCP servers. In case if you did not recieve the log data don't worry give it some time it will automatically send the logs after the timer function expires in our Batch Processor. + +For more info on Batch-Processor in Apache APISIX please refer. +[Batch-Processor](../batch-processor.md) + ## Attributes |Name |Requirement |Description| @@ -47,7 +52,6 @@ This will provide the ability to send Log data requests as JSON objects to Monit |max_retry_count|optional |Maximum number of retries before removing from the processing pipe line; default is zero| |retry_delay |optional |Number of seconds the process execution should be delayed if the execution fails; default is 1| - ## How To Enable The following is an example on how to enable the tcp-logger for a specific route. @@ -91,7 +95,7 @@ Remove the corresponding json configuration in the plugin configuration to disab APISIX plugins are hot-reloaded, therefore no need to restart APISIX. ```shell -$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -X PUT -d value=' +$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value=' { "methods": ["GET"], "uri": "/hello", diff --git a/doc/plugins/udp-logger.md b/doc/plugins/udp-logger.md index b7a36d2e2e0e8..2d6d0f0986986 100644 --- a/doc/plugins/udp-logger.md +++ b/doc/plugins/udp-logger.md @@ -18,19 +18,24 @@ --> # Summary + - [**Name**](#name) - [**Attributes**](#attributes) - [**How To Enable**](#how-to-enable) - [**Test Plugin**](#test-plugin) - [**Disable Plugin**](#disable-plugin) - ## Name `udp-logger` is a plugin which push Log data requests to UDP servers. This will provide the ability to send Log data requests as JSON objects to Monitoring tools and other UDP servers. +This plugin provides the ability to push Log data as a batch to you're external UDP servers. In case if you did not recieve the log data don't worry give it some time it will automatically send the logs after the timer function expires in our Batch Processor. + +For more info on Batch-Processor in Apache APISIX please refer. +[Batch-Processor](../batch-processor.md) + ## Attributes |Name |Requirement |Description| @@ -85,7 +90,7 @@ Remove the corresponding json configuration in the plugin configuration to disab APISIX plugins are hot-reloaded, therefore no need to restart APISIX. ```shell -$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -X PUT -d value=' +$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value=' { "methods": ["GET"], "uri": "/hello", diff --git a/doc/plugins/uri-blocker.md b/doc/plugins/uri-blocker.md new file mode 100644 index 0000000000000..270f3039c2d0d --- /dev/null +++ b/doc/plugins/uri-blocker.md @@ -0,0 +1,96 @@ + + +[Chinese](uri-blocker.md) + +# Summary + +- [**Name**](#name) +- [**Attributes**](#attributes) +- [**How To Enable**](#how-to-enable) +- [**Test Plugin**](#test-plugin) +- [**Disable Plugin**](#disable-plugin) + +## Name + +The plugin helps we intercept user requests, we only need to indicate the `block_rules`. + +## Attributes + +|Name |Requirement |Description| +|--------- |--------|-----------| +|block_rules |required|Regular filter rule array. Each of these items is a regular rule. If the current request URI hits any one of them, set the response code to rejected_code to exit the current user request. Example: `["root.exe", "root.m+"]`.| +|rejected_code |optional|The HTTP status code returned when the request URI hit any of `filter_rule`, default `403`.| + +## How To Enable + +Here's an example, enable the `uri blocker` plugin on the specified route: + +```shell +curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/*", + "plugins": { + "uri-blocker": { + "block_rules": ["root.exe", "root.m+"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## Test Plugin + +```shell +$ curl -i http://127.0.0.1:9080/root.exe?a=a +HTTP/1.1 403 Forbidden +Date: Wed, 17 Jun 2020 13:55:41 GMT +Content-Type: text/html; charset=utf-8 +Content-Length: 150 +Connection: keep-alive +Server: APISIX web server + +... ... +``` + +## Disable Plugin + +When you want to disable the `uri blocker` plugin, it is very simple, + you can delete the corresponding json configuration in the plugin configuration, + no need to restart the service, it will take effect immediately: + +```shell +curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/*", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +The `uri blocker` plugin has been disabled now. It works for other plugins. diff --git a/doc/plugins/wolf-rbac.md b/doc/plugins/wolf-rbac.md index 6e94ee68486fa..c8a5de3a605b2 100644 --- a/doc/plugins/wolf-rbac.md +++ b/doc/plugins/wolf-rbac.md @@ -17,7 +17,7 @@ # --> -[中文](wolf-rbac-cn.md) +[中文](../zh-cn/plugins/wolf-rbac.md) # Summary diff --git a/doc/plugins/zipkin.md b/doc/plugins/zipkin.md index b4bf4c0380b65..ec8008707fe20 100644 --- a/doc/plugins/zipkin.md +++ b/doc/plugins/zipkin.md @@ -17,7 +17,7 @@ # --> -[Chinese](zipkin-cn.md) +[Chinese](../zh-cn/plugins/zipkin.md) # Summary - [**Name**](#name) @@ -111,7 +111,7 @@ When you want to disable the zipkin plugin, it is very simple, no need to restart the service, it will take effect immediately: ```shell -$ curl http://127.0.0.1:2379/v2/keys/apisix/routes/1 -X PUT -d value=' +$ curl http://127.0.0.1:2379/v2/keys/apisix/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value=' { "methods": ["GET"], "uri": "/index.html", diff --git a/doc/stand-alone.md b/doc/stand-alone.md index 2093ae4744e70..b88d81aa3ee50 100644 --- a/doc/stand-alone.md +++ b/doc/stand-alone.md @@ -17,7 +17,7 @@ # --> -[Chinese](stand-alone-cn.md) +[Chinese](zh-cn/stand-alone.md) ## Stand-alone mode diff --git a/doc/stream-proxy.md b/doc/stream-proxy.md index 7956be7496c24..606b92786b963 100644 --- a/doc/stream-proxy.md +++ b/doc/stream-proxy.md @@ -17,7 +17,7 @@ # --> -[Chinese](stream-proxy-cn.md) +[Chinese](zh-cn/stream-proxy.md) # Stream Proxy diff --git a/doc/zh-cn/README.md b/doc/zh-cn/README.md new file mode 100644 index 0000000000000..88966ee4f177d --- /dev/null +++ b/doc/zh-cn/README.md @@ -0,0 +1,83 @@ + +[English](../README.md) + +参考文档 +================== + +* [APISIX 说明](../../README_CN.md) +* [架构设计](architecture-design.md) +* [压力测试](benchmark.md) +* [如何构建 Apache APISIX](how-to-build.md) +* [健康检查](health-check.md): 支持对上游节点的主动和被动健康检查,在负载均衡时自动过滤掉不健康的节点。 +* [路由 radixtree](../router-radixtree.md) +* [独立运行模型](stand-alone.md): 支持从本地 yaml 格式的配置文件启动,更适合 Kubernetes(k8s) 体系。 +* [TCP/UDP 动态代理](stream-proxy.md) +* [管理 API](admin-api.md) +* [变更日志](../../CHANGELOG_CN.md) +* [代码风格](../CODE_STYLE.md) +* [常见问答](../../FAQ_CN.md) + +插件 +=== + +* [插件热加载](plugins.md):无需重启服务,完成插件热加载或卸载。 +* [HTTPS](https.md):根据 TLS 扩展字段 SNI(Server Name Indication) 动态加载证书。 +* [动态负载均衡](architecture-design.md#upstream):跨多个上游服务的动态负载均衡,目前已支持 round-robin 和一致性哈希算法。 +* [key-auth](plugins/key-auth.md):基于 Key Authentication 的用户认证。 +* [JWT-auth](plugins/jwt-auth.md):基于 [JWT](https://jwt.io/) (JSON Web Tokens) Authentication 的用户认证。 +* [basic-auth](plugins/basic-auth.md):基于 basic auth 的用户认证。 +* [wolf-rbac](plugins/wolf-rbac.md) 基于 *RBAC* 的用户认证及授权。 +* [limit-count](plugins/limit-count.md):基于“固定窗口”的限速实现。 +* [limit-req](plugins/limit-req.md):基于漏桶原理的请求限速实现。 +* [limit-conn](plugins/limit-conn.md):限制并发请求(或并发连接)。 +* [proxy-rewrite](plugins/proxy-rewrite.md): 支持自定义修改 proxy 到上游的信息。 +* [prometheus](plugins/prometheus.md):以 Prometheus 格式导出 APISIX 自身的状态信息,方便被外部 Prometheus 服务抓取。 +* [OpenTracing](plugins/zipkin.md):支持 Zikpin 和 Apache SkyWalking。 +* [grpc-transcode](plugins/grpc-transcode.md):REST <--> gRPC 转码。 +* [serverless](plugins/serverless.md):允许在 APISIX 中的不同阶段动态运行 Lua 代码。 +* [ip-restriction](plugins/ip-restriction.md): IP 黑白名单。 +* [openid-connect](plugins/oauth.md) +* [redirect](plugins/redirect.md): URI 重定向。 +* [response-rewrite](plugins/response-rewrite.md): 支持自定义修改返回内容的 `status code`、`body`、`headers`。 +* [fault-injection](plugins/fault-injection.md):故障注入,可以返回指定的响应体、响应码和响应时间,从而提供了不同的失败场景下处理的能力,例如服务失败、服务过载、服务高延时等。 +* [proxy-cache](plugins/proxy-cache.md):代理缓存插件提供缓存后端响应数据的能力。 +* [proxy-mirror](plugins/proxy-mirror.md):代理镜像插件提供镜像客户端请求的能力。 +* [udp-logger](plugins/udp-logger.md): 将请求记录到 UDP 服务器 +* [tcp-logger](plugins/tcp-logger.md): 将请求记录到 TCP 服务器 +* [kafka-logger](plugins/kafka-logger.md): 将请求记录到外部 Kafka 服务器。 +* [cors](plugins/cors.md): 为你的API启用 CORS +* [batch-requests](plugins/batch-requests.md): 以 **http pipeline** 的方式在网关一次性发起多个 `http` 请求。 +* [authz-keycloak](plugins/authz-keycloak-cn.md): 支持 Keycloak 身份认证服务器 +* [uri-blocker](plugins/uri-blocker.md): 根据 URI 拦截用户请求。 +* [oauth](plugins/oauth.md): 提供 OAuth 2 身份验证和自省。 + +部署 +======= + +### AWS + +推荐的方法是在 [AWS Fargate](https://aws.amazon.com/fargate/) 上使用 [AWS CDK](https://aws.amazon.com/cdk/) 部署 APISIX,这有助于将 APISIX 层和上游层分离到具有自动缩放功能的完全托管和安全的无服务器容器计算环境之上。 + +请参阅 [Pahud Hsieh](https://github.com/pahud) 撰写的[指南](https://github.com/pahud/cdk-samples/blob/master/typescript/apisix/README.md),了解如何在 AWS CDK 中 100% 配置推荐的架构。 + +### Kubernetes + +请参阅[指南](../../kubernetes/README.md)并了解如何在 Kubernetes 中部署 APISIX。 + diff --git a/doc/zh-cn/_sidebar.md b/doc/zh-cn/_sidebar.md new file mode 100644 index 0000000000000..95504b63e965f --- /dev/null +++ b/doc/zh-cn/_sidebar.md @@ -0,0 +1,103 @@ + + +- Getting started + + - [Introduction](./zh-cn/README.md) + - [Quick start](./zh-cn/getting-started.md) + +- General + + - [Architecture](./zh-cn/architecture-design.md) + + - [Benchmark](./zh-cn/benchmark.md) + + - Installation + + - [How to build](./zh-cn/how-to-build.md) + - [Install Dependencies](./zh-cn/install-dependencies.md) + + - [HTTPS](./zh-cn/https.md) + + - [Router](./zh-cn/router-radixtree.md) + + - Plugins + + - [Develop Plugins](./zh-cn/plugin-develop.md) + - [Hot Reload](./zh-cn/plugins.md) + + - Proxy Modes + + - [GRPC Proxy](./zh-cn/grpc-proxy.md) + - [Stream Proxy](./zh-cn/stream-proxy.md) + +- Plugins + + - Authentication + + - [Key Auth](./zh-cn/plugins/key-auth.md) + - [Basic Auth](./zh-cn/plugins/basic-auth.md) + - [JWT Auth](./zh-cn/plugins/jwt-auth.md) + - [Opend ID Connect](./zh-cn/plugins/oauth.md) + + - General + + - [Redirect](./zh-cn/plugins/redirect.md) + - [Serverless](./zh-cn/plugins/serverless.md) + - [Batch Request](./zh-cn/plugins/batch-requests.md) + - [Fault Injection](./zh-cn/plugins/fault-injection.md) + - [MQTT Proxy](./zh-cn/plugins/mqtt-proxy.md) + - [Proxy Cache](./zh-cn/plugins/proxy-cache.md) + - [Proxy Mirror](./zh-cn/plugins/proxy-mirror.md) + - [Echo](./zh-cn/plugins/echo.md) + + - Transformations + + - [Response Rewrite](./zh-cn/plugins/response-rewrite.md) + - [Proxy Rewrite](./zh-cn/plugins/proxy-rewrite.md) + - [GRPC Transcoding](./zh-cn/plugins/grpc-transcode.md) + + - Security + + - [Consumer Restriction](./zh-cn/plugins/consumer-restriction.md) + - [Limit Connection](./zh-cn/plugins/limit-conn.md) + - [Limit Count](./zh-cn/plugins/limit-count.md) + - [Limit Request](./zh-cn/plugins/limit-req.md) + - [CORS](./zh-cn/plugins/cors.md) + - [IP Restriction](./zh-cn/plugins/ip-restriction.md) + - [Keycloak Authorization](./zh-cn/plugins/authz-keycloak.md) + - [RBAC Wolf](./zh-cn/plugins/wolf-rbac.md) + + - Monitoring + + - [Prometheus](./zh-cn/plugins/prometheus.md) + - [SKywalking](./zh-cn/plugins/skywalking.md) + - [Zipkin](./zh-cn/plugins/zipkin.md) + + - Loggers + + - [HTTP Logger](./zh-cn/plugins/http-logger.md) + - [Kafka Logger](./zh-cn/plugins/kafka-logger.md) + - [Syslog](./zh-cn/plugins/syslog.md) + - [TCP Logger](./zh-cn/plugins/tcp-logger.md) + - [UDP Logger](./zh-cn/plugins/udp-logger.md) + +- Admin API + + - [Admin API](./zh-cn/admin-api.md) diff --git a/doc/admin-api-cn.md b/doc/zh-cn/admin-api.md similarity index 86% rename from doc/admin-api-cn.md rename to doc/zh-cn/admin-api.md index 7a7d3555b27cc..16256611d791e 100644 --- a/doc/admin-api-cn.md +++ b/doc/zh-cn/admin-api.md @@ -40,7 +40,7 @@ |PUT |/apisix/admin/routes/{id}|{...}|根据 id 创建资源| |POST |/apisix/admin/routes |{...}|创建资源,id 由后台服务自动生成| |DELETE |/apisix/admin/routes/{id}|无|删除资源| -|PATCH |/apisix/admin/routes/{id}/{path}|{...}|修改已有 Route 的部分内容,其他不涉及部分会原样保留。| +|PATCH |/apisix/admin/routes/{id}|{...}|修改已有 Route 的部分内容,其他不涉及部分会原样保留;如果你要删除某个属性,将该属性的值设置为null 即可删除| > uri 请求参数: @@ -52,21 +52,22 @@ |名字 |可选项 |类型 |说明 |示例| |---------|---------|----|-----------|----| -|uri |与 `uris` 二选一 |匹配规则|除了如 `/foo/bar`、`/foo/gloo` 这种全量匹配外,使用不同 [Router](architecture-design-cn.md#router) 还允许更高级匹配,更多见 [Router](architecture-design-cn.md#router)。|"/hello"| +|uri |与 `uris` 二选一 |匹配规则|除了如 `/foo/bar`、`/foo/gloo` 这种全量匹配外,使用不同 [Router](architecture-design.md#router) 还允许更高级匹配,更多见 [Router](architecture-design.md#router)。|"/hello"| |uris |与 `uri` 二选一 |匹配规则|数组形式,可以匹配多个 `uri`|["/hello", "/world"]| -|plugins |`plugins`、`upstream`/`upstream_id`、`service_id`至少选择一个 |Plugin|详见 [Plugin](architecture-design-cn.md#plugin) || -|upstream |`plugins`、`upstream`/`upstream_id`、`service_id`至少选择一个 |Upstream|启用的 Upstream 配置,详见 [Upstream](architecture-design-cn.md#upstream)|| -|upstream_id|`plugins`、`upstream`/`upstream_id`、`service_id`至少选择一个 |Upstream|启用的 upstream id,详见 [Upstream](architecture-design-cn.md#upstream)|| -|service_id|`plugins`、`upstream`/`upstream_id`、`service_id`至少选择一个 |Service|绑定的 Service 配置,详见 [Service](architecture-design-cn.md#service)|| +|plugins |`plugins`、`upstream`/`upstream_id`、`service_id`至少选择一个 |Plugin|详见 [Plugin](architecture-design.md#plugin) || +|upstream |`plugins`、`upstream`/`upstream_id`、`service_id`至少选择一个 |Upstream|启用的 Upstream 配置,详见 [Upstream](architecture-design.md#upstream)|| +|upstream_id|`plugins`、`upstream`/`upstream_id`、`service_id`至少选择一个 |Upstream|启用的 upstream id,详见 [Upstream](architecture-design.md#upstream)|| +|service_id|`plugins`、`upstream`/`upstream_id`、`service_id`至少选择一个 |Service|绑定的 Service 配置,详见 [Service](architecture-design.md#service)|| |service_protocol|可选|上游协议类型|只可以是 "grpc", "http" 二选一。|默认 "http",使用gRPC proxy 或gRPC transcode 时,必须用"grpc"| -|desc |可选 |辅助 |标识路由名称、使用场景等。|客户 xxxx| +|name |可选 |辅助 |标识路由名称|route-xxxx| +|desc |可选 |辅助 |标识描述、使用场景等。|客户 xxxx| |host |可选 |匹配规则|当前请求域名,比如 `foo.com`;也支持泛域名,比如 `*.foo.com`。|"foo.com"| |hosts |可选 |匹配规则|列表形态的 `host`,表示允许有多个不同 `host`,匹配其中任意一个即可。|{"foo.com", "*.bar.com"}| |remote_addr|可选 |匹配规则|客户端请求 IP 地址: `192.168.1.101`、`192.168.1.102` 以及 CIDR 格式的支持 `192.168.1.0/24`。特别的,APISIX 也完整支持 IPv6 地址匹配:`::1`,`fe80::1`, `fe80::1/64` 等。|"192.168.1.0/24"| |remote_addrs|可选 |匹配规则|列表形态的 `remote_addr`,表示允许有多个不同 IP 地址,符合其中任意一个即可。|{"127.0.0.1", "192.0.0.0/8", "::1"}| |methods |可选 |匹配规则|如果为空或没有该选项,代表没有任何 `method` 限制,也可以是一个或多个的组合:`GET`, `POST`, `PUT`, `DELETE`, `PATCH`, `HEAD`, `OPTIONS`,`CONNECT`,`TRACE`。|{"GET", "POST"}| |priority |可选 |匹配规则|如果不同路由包含相同 `uri`,根据属性 `priority` 确定哪个 `route` 被优先匹配,值越大优先级越高,默认值为 0。|priority = 10| -|vars |可选 |匹配规则|由一个或多个`{var, operator, val}`元素组成的列表,类似这样:`{{var, operator, val}, {var, operator, val}, ...}`。例如:`{"arg_name", "==", "json"}`,表示当前请求参数 `name` 是 `json`。这里的 `var` 与 Nginx 内部自身变量命名是保持一致,所以也可以使用 `request_uri`、`host` 等;对于 `operator` 部分,目前已支持的运算符有 `==`、`~=`、`>`、`<` 和 `~~`。对于`>`和`<`两个运算符,会把结果先转换成 number 然后再做比较。查看支持的[运算符列表](#运算符列表)|{{"arg_name", "==", "json"}, {"arg_age", ">", 18}}| +|vars |可选 |匹配规则|由一个或多个`{var, operator, val}`元素组成的列表,类似这样:`{{var, operator, val}, {var, operator, val}, ...}}`。例如:`{"arg_name", "==", "json"}`,表示当前请求参数 `name` 是 `json`。这里的 `var` 与 Nginx 内部自身变量命名是保持一致,所以也可以使用 `request_uri`、`host` 等;对于 `operator` 部分,目前已支持的运算符有 `==`、`~=`、`>`、`<` 和 `~~`。对于`>`和`<`两个运算符,会把结果先转换成 number 然后再做比较。查看支持的[运算符列表](#运算符列表)|{{"arg_name", "==", "json"}, {"arg_age", ">", 18}}| |filter_func|可选|匹配规则|用户自定义的过滤函数。可以使用它来实现特殊场景的匹配要求实现。该函数默认接受一个名为 vars 的输入参数,可以用它来获取 Nginx 变量。|function(vars) return vars["arg_name"] == "json" end| 有两点需要特别注意: @@ -86,6 +87,7 @@ route 对象 json 配置内容: "hosts": ["a.com","b.com"], # 一组 host 域名, host 与 hosts 只需要有一个非空即可 "plugins": {}, # 指定 route 绑定的插件 "priority": 0, # apisix 支持多种匹配方式,可能会在一次匹配中同时匹配到多条路由,此时优先级高的优先匹配中 + "name": "路由xxx", "desc": "hello world", "remote_addr": "127.0.0.1", # 客户端请求 IP 地址 "remote_addrs": ["127.0.0.1"], # 一组客户端请求 IP 地址, remote_addr 与 remote_addrs 只需要有一个非空即可 @@ -187,16 +189,17 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 |PUT |/apisix/admin/services/{id}|{...}|根据 id 创建资源| |POST |/apisix/admin/services |{...}|创建资源,id 由后台服务自动生成| |DELETE |/apisix/admin/services/{id}|无|删除资源| -|PATCH |/apisix/admin/services/{id}/{path}|{...}|修改已有 Service 的部分内容,其他不涉及部分会原样保留。| +|PATCH |/apisix/admin/services/{id}|{...}|修改已有 Service 的部分内容,其他不涉及部分会原样保留;如果你要删除某个属性,将该属性的值设置为null 即可删除| > body 请求参数: |名字 |可选项 |类型 |说明 |示例| |---------|---------|----|-----------|----| -|plugins |可选 |Plugin|详见 [Plugin](architecture-design-cn.md#plugin) || -|upstream | upstream 或 upstream_id 两个选一个 |Upstream|启用的 Upstream 配置,详见 [Upstream](architecture-design-cn.md#upstream)|| -|upstream_id| upstream 或 upstream_id 两个选一个 |Upstream|启用的 upstream id,详见 [Upstream](architecture-design-cn.md#upstream)|| -|desc |可选 |辅助 |标识服务名称、使用场景等。|| +|plugins |可选 |Plugin|详见 [Plugin](architecture-design.md#plugin) || +|upstream | upstream 或 upstream_id 两个选一个 |Upstream|启用的 Upstream 配置,详见 [Upstream](architecture-design.md#upstream)|| +|upstream_id| upstream 或 upstream_id 两个选一个 |Upstream|启用的 upstream id,详见 [Upstream](architecture-design.md#upstream)|| +|name |可选 |辅助 |标识服务名称。|| +|desc |可选 |辅助 |服务描述、使用场景等。|| serivce 对象 json 配置内容: @@ -206,6 +209,7 @@ serivce 对象 json 配置内容: "plugins": {}, # 指定 service 绑定的插件 "upstream_id": "1", # upstream 对象在 etcd 中的 id ,建议使用此值 "upstream": {}, # upstream 信息对象,不建议使用 + "name": "测试svc", # service 名称 "desc": "hello world", # service 描述 } ``` @@ -214,7 +218,7 @@ serivce 对象 json 配置内容: ```shell # 创建一个Service -$ curl http://127.0.0.1:9080/apisix/admin/services/201 -X PUT -i -d ' +$ curl http://127.0.0.1:9080/apisix/admin/services/201 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "plugins": { "limit-count": { @@ -294,7 +298,7 @@ consumer 对象 json 配置内容: ```shell # 创建 Consumer ,指定认证插件 key-auth ,并开启特定插件 limit-count -$ curl http://127.0.0.1:9080/apisix/admin/consumers/2 -X PUT -i -d ' +$ curl http://127.0.0.1:9080/apisix/admin/consumers/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "username": "jack", "plugins": { @@ -336,7 +340,7 @@ Date: Thu, 26 Dec 2019 08:17:49 GMT |PUT |/apisix/admin/upstreams/{id}|{...}|根据 id 创建资源| |POST |/apisix/admin/upstreams |{...}|创建资源,id 由后台服务自动生成| |DELETE |/apisix/admin/upstreams/{id}|无|删除资源| -|PATCH |/apisix/admin/upstreams/{id}/{path}|{...}|修改已有 Route 的部分内容,其他不涉及部分会原样保留。| +|PATCH |/apisix/admin/upstreams/{id}|{...}|修改已有 Route 的部分内容,其他不涉及部分会原样保留;如果你要删除某个属性,将该属性的值设置为null 即可删除| > body 请求参数: @@ -348,12 +352,13 @@ APISIX 的 Upstream 除了基本的复杂均衡算法选择外,还支持对上 |k8s_deployment_info|与 `nodes` 二选一|哈希表|字段包括 `namespace`、`deploy_name`、`service_name`、`port`、`backend_type`,其中 `port` 字段为数值,`backend_type` 为 `pod` 或 `service`,其他为字符串 | `{"namespace": "test-namespace", "deploy_name": "test-deploy-name", "service_name": "test-service-name", "backend_type": "pod", "port": 8080}` | |type |必需|枚举|`roundrobin` 支持权重的负载,`chash` 一致性哈希,两者是二选一的|`roundrobin`|| |key |条件必需|匹配类型|该选项只有类型是 `chash` 才有效。根据 `key` 来查找对应的 node `id`,相同的 `key` 在同一个对象中,永远返回相同 id,目前支持的 Nginx 内置变量有 `uri, server_name, server_addr, request_uri, remote_port, remote_addr, query_string, host, hostname, arg_***`,其中 `arg_***` 是来自URL的请求参数,[Nginx 变量列表](http://nginx.org/en/docs/varindex.html)|| -|checks |可选|health_checker|配置健康检查的参数,详细可参考[health-check](health-check.md)|| +|checks |可选|health_checker|配置健康检查的参数,详细可参考[health-check](../health-check.md)|| |retries |可选|整型|使用底层的 Nginx 重试机制将请求传递给下一个上游,默认不启用重试机制|| |timeout |可选|超时时间对象|设置连接、发送消息、接收消息的超时时间|| |enable_websocket |可选 |辅助|是否允许启用 websocket 能力|| |hash_on |可选 |辅助|该参数作为一致性 hash 的入参|| -|desc |可选 |辅助|标识服务名称、使用场景等。|| +|name |可选 |辅助|标识上游服务名称、使用场景等。|| +|desc |可选 |辅助|上游服务描述、使用场景等。|| upstream 对象 json 配置内容: @@ -379,6 +384,7 @@ upstream 对象 json 配置内容: "checks": {}, # 配置健康检查的参数 "hash_on": "", "key": "", + "name": "upstream-xxx", # upstream 名称 "desc": "hello world", # upstream 描述 } ``` @@ -387,15 +393,15 @@ upstream 对象 json 配置内容: ```shell # 创建一个upstream -$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100 -i -X PUT -d ' -> { -> "type": "roundrobin", -> "nodes": { -> "127.0.0.1:80": 1, -> "127.0.0.2:80": 2, -> "foo.com:80": 3 -> } -> }' +$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -i -X PUT -d ' +{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:80":1, + "127.0.0.2:80":2, + "foo.com:80":3 + } +}' HTTP/1.1 201 Created Date: Thu, 26 Dec 2019 04:19:34 GMT Content-Type: text/plain diff --git a/doc/architecture-design-cn.md b/doc/zh-cn/architecture-design.md similarity index 89% rename from doc/architecture-design-cn.md rename to doc/zh-cn/architecture-design.md index 3fa688448fe33..a9f66846546ce 100644 --- a/doc/architecture-design-cn.md +++ b/doc/zh-cn/architecture-design.md @@ -34,7 +34,7 @@ ### 插件加载流程 -![插件加载流程](./images/flow-load-plugin.png) +![插件加载流程](../images/flow-load-plugin.png) ### 插件内部结构 @@ -105,7 +105,7 @@ Server: APISIX web server 当我们接收到成功应答,表示该 Route 已成功创建。 -有关 Route 的具体选项,可具体查阅 [Admin API 之 Route](admin-api-cn.md#route)。 +有关 Route 的具体选项,可具体查阅 [Admin API 之 Route](admin-api.md#route)。 [返回目录](#目录) @@ -193,7 +193,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/102 -H 'X-API-KEY: edd1c9f034335f 优先级更高。 一个插件在一次请求中只会执行一次,即使被同时绑定到多个不同对象中(比如 Route 或 Service)。 -插件运行先后顺序是根据插件自身的优先级来决定的,例如:[example-plugin](../apisix/plugins/example-plugin.lua#L37)。 +插件运行先后顺序是根据插件自身的优先级来决定的,例如:[example-plugin](../../apisix/plugins/example-plugin.lua#L37)。 插件配置作为 Route 或 Service 的一部分提交的,放到 `plugins` 下。它内部是使用插件 名字作为哈希的 key 来保存不同插件的配置项。 @@ -216,7 +216,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/102 -H 'X-API-KEY: edd1c9f034335f 并不是所有插件都有具体配置项,比如 `prometheus` 下是没有任何具体配置项,这时候用一个空的对象 标识即可。 -[查看 APISIX 已支持插件列表](plugins-cn.md) +[查看 APISIX 已支持插件列表](plugins.md) [返回目录](#目录) @@ -238,11 +238,12 @@ APISIX 的 Upstream 除了基本的复杂均衡算法选择外,还支持对上 |名字 |可选|说明| |------- |-----|------| |type |必填|`roundrobin` 支持权重的负载,`chash` 一致性哈希,两者是二选一的| -|nodes |与 `k8s_deployment_info` 二选一|哈希表,内部元素的 key 是上游机器地址列表,格式为`地址 + Port`,其中地址部分可以是 IP 也可以是域名,比如 `192.168.1.100:80`、`foo.com:80` 等。value 则是节点的权重。当权重值为 `0` 代表该上游节点失效,不会被选中,可以用于暂时摘除节点的情况。| -|k8s_deployment_info|与 `nodes` 二选一|哈希表|字段包括 `namespace`、`deploy_name`、`service_name`、`port`、`backend_type`,其中 `port` 字段为数值,`backend_type` 为 `pod` 或 `service`,其他为字符串 | +|nodes |与 `k8s_deployment_info`、 `service_name` 三选一|哈希表,内部元素的 key 是上游机器地址列表,格式为`地址 + Port`,其中地址部分可以是 IP 也可以是域名,比如 `192.168.1.100:80`、`foo.com:80` 等。value 则是节点的权重。当权重值为 `0` 代表该上游节点失效,不会被选中,可以用于暂时摘除节点的情况。| +|service_name |与 `nodes`、 `k8s_deployment_info` 三选一 |用于设置上游服务名,并配合注册中心使用,详细可参考[集成服务发现注册中心](discovery.md) | +|k8s_deployment_info|与 `nodes`、 `service_name` 三选一|哈希表|字段包括 `namespace`、`deploy_name`、`service_name`、`port`、`backend_type`,其中 `port` 字段为数值,`backend_type` 为 `pod` 或 `service`,其他为字符串 | |key |可选|在 `type` 等于 `chash` 是必选项。 `key` 需要配合 `hash_on` 来使用,通过 `hash_on` 和 `key` 来查找对应的 node `id`| |hash_on |可选|`hash_on` 支持的类型有 `vars`(Nginx内置变量),`header`(自定义header),`cookie`,`consumer`,默认值为 `vars`| -|checks |可选|配置健康检查的参数,详细可参考[health-check](health-check.md)| +|checks |可选|配置健康检查的参数,详细可参考[health-check](../health-check.md)| |retries |可选|使用底层的 Nginx 重试机制将请求传递给下一个上游,默认 APISIX 会启用重试机制,根据配置的后端节点个数设置重试次数,如果此参数显式被设置将会覆盖系统默认设置的重试次数。| |enable_websocket|可选| 是否启用 `websocket`(布尔值),默认不启用| @@ -352,7 +353,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 }' ``` -更多细节可以参考[健康检查的文档](health-check.md)。 +更多细节可以参考[健康检查的文档](../health-check.md)。 下面是几个使用不同`hash_on`类型的配置示例: @@ -361,7 +362,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 创建一个consumer对象: ```shell -curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ` +curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "username": "jack", "plugins": { @@ -369,7 +370,7 @@ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f1 "key": "auth-jack" } } -}` +}' ``` 新建路由,打开`key-auth`插件认证,`upstream`的`hash_on`类型为`consumer`: @@ -459,7 +460,7 @@ APISIX 区别于其他 API 网关的一大特点是允许用户选择不同 Rout 在本地配置 `conf/config.yaml` 中设置最符合自身业务需求的路由。 * `apisix.router.http`: HTTP 请求路由。 - * `radixtree_uri`: (默认)只使用 `uri` 作为主索引。基于 `radixtree` 引擎,支持全量和深前缀匹配,更多见 [如何使用 router-radixtree](router-radixtree.md)。 + * `radixtree_uri`: (默认)只使用 `uri` 作为主索引。基于 `radixtree` 引擎,支持全量和深前缀匹配,更多见 [如何使用 router-radixtree](../router-radixtree.md)。 * `绝对匹配`:完整匹配给定的 `uri` ,比如 `/foo/bar`,`/foo/glo`。 * `前缀匹配`:末尾使用 `*` 代表给定的 `uri` 是前缀匹配。比如 `/foo*`,则允许匹配 `/foo/`、`/foo/a`和`/foo/b`等。 * `匹配优先级`:优先尝试绝对匹配,若无法命中绝对匹配,再尝试前缀匹配。 @@ -489,14 +490,14 @@ APISIX 区别于其他 API 网关的一大特点是允许用户选择不同 Rout -1. 授权认证:比如有 [key-auth](./plugins/key-auth.md)、[JWT](./plugins/jwt-auth-cn.md) 等。 +1. 授权认证:比如有 [key-auth](../plugins/key-auth.md)、[JWT](plugins/jwt-auth.md) 等。 2. 获取 consumer_id:通过授权认证,即可自然获取到对应的 Consumer `id`,它是 Consumer 对象的唯一识别标识。 3. 获取 Consumer 上绑定的 Plugin 或 Upstream 信息:完成对不同 Consumer 做不同配置的效果。 概括一下,Consumer 是某类服务的消费者,需与用户认证体系配合才能使用。 比如不同的 Consumer 请求同一个 API,网关服务根据当前请求用户信息,对应不同的 Plugin 或 Upstream 配置。 -此外,大家也可以参考 [key-auth](./plugins/key-auth.md) 认证授权插件的调用逻辑,辅助大家来进一步理解 Consumer 概念和使用。 +此外,大家也可以参考 [key-auth](../plugins/key-auth.md) 认证授权插件的调用逻辑,辅助大家来进一步理解 Consumer 概念和使用。 如何对某个 Consumer 开启指定插件,可以看下面例子: @@ -547,6 +548,36 @@ HTTP/1.1 503 Service Temporarily Unavailable ``` +结合 [consumer-restriction](plugins/consumer-restriction.md) 插件,限制jack对该 route 的访问 + +# 设置黑名单,禁止jack访问该API +$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins": { + "key-auth": {}, + "consumer-restriction": { + "blacklist": [ + "jack" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' + +# 反复测试,均返回 403,jack被禁止访问 +$ curl http://127.0.0.1:9080/hello -H 'apikey: auth-one' -I +HTTP/1.1 403 +... + +``` + + [返回目录](#目录) ## Global Rule @@ -558,6 +589,7 @@ HTTP/1.1 503 Service Temporarily Unavailable curl -X PUT \ https://{apisix_listen_address}/apisix/admin/global_rules/1 \ -H 'Content-Type: application/json' \ + -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ -d '{ "plugins": { "limit-count": { @@ -576,7 +608,7 @@ curl -X PUT \ 我们可以通过以下接口查看所有的 `GlobalRule`: ```shell -curl https://{apisix_listen_address}/apisix/admin/global_rules +curl https://{apisix_listen_address}/apisix/admin/global_rules -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' ``` [返回目录](#目录) diff --git a/doc/zh-cn/batch-processor.md b/doc/zh-cn/batch-processor.md new file mode 100644 index 0000000000000..e9cfe2816bbba --- /dev/null +++ b/doc/zh-cn/batch-processor.md @@ -0,0 +1,69 @@ + + +[English](../batch-processor.md) + +# 批处理机 + +批处理处理器可用于聚合条目(日志/任何数据)并进行批处理。 +当batch_max_size设置为零时,处理器将立即执行每个条目。将批处理的最大大小设置为大于1将开始聚合条目,直到达到最大大小或超时到期为止 + +## 构型 + +创建批处理程序的唯一必需参数是函数。当批处理达到最大大小或缓冲区持续时间超过时,将执行该功能。 + +|名称 |需求 |描述| +|------- |----- |------| +|id |可选的 |标识批处理者的唯一标识符| +|batch_max_size |可选的 |每批的最大大小,默认为1000| +|inactive_timeout|可选的 |如果不活动,将刷新缓冲区的最大时间(以秒为单位),默认值为5s| +|buffer_duration|可选的 |必须先处理批次中最旧条目的最大期限(以秒为单位),默认是5| +|max_retry_count|可选的 |从处理管道中移除之前的最大重试次数;默认为零| +|retry_delay |可选的 |如果执行失败,应该延迟进程执行的秒数;默认为1| + +以下代码显示了如何使用批处理程序的示例。批处理处理器将要执行的功能作为第一个参数,将批处理配置作为第二个参数。 + +```lua +local bp = require("apisix.plugins.batch-processor") +local func_to_execute = function(entries) + -- serialize to json array core.json.encode(entries) + -- process/send data + return true + end + +local config = { + max_retry_count = 2, + buffer_duration = 60, + inactive_timeout = 5, + batch_max_size = 1, + retry_delay = 0 +} + + +local batch_processor, err = bp:new(func_to_execute, config) + +if batch_processor then + batch_processor:push({hello='world'}) +end +``` + + +注意:请确保批处理的最大大小(条目数)在函数执行的范围内。 +刷新批处理的计时器基于“ inactive_timeout”配置运行。因此,为了获得最佳使用效果, +保持“ inactive_timeout”小于“ buffer_duration”。 diff --git a/doc/benchmark-cn.md b/doc/zh-cn/benchmark.md similarity index 96% rename from doc/benchmark-cn.md rename to doc/zh-cn/benchmark.md index b42cf90f4763f..5e1dd4c80b653 100644 --- a/doc/benchmark-cn.md +++ b/doc/zh-cn/benchmark.md @@ -17,7 +17,7 @@ # --> -[English](benchmark.md) +[English](../benchmark.md) ### 测试环境 @@ -66,4 +66,4 @@ #### 火焰图 火焰图的采样结果: -![火焰图采样结果](../doc/images/flamegraph-2.jpg) +![火焰图采样结果](../images/flamegraph-2.jpg) diff --git a/doc/zh-cn/discovery.md b/doc/zh-cn/discovery.md new file mode 100644 index 0000000000000..003cc287be8d7 --- /dev/null +++ b/doc/zh-cn/discovery.md @@ -0,0 +1,253 @@ + +[English](../discovery.md) + +# 集成服务发现注册中心 + +* [**摘要**](#摘要) +* [**如何扩展注册中心**](#如何扩展注册中心) + * [**基本步骤**](#基本步骤) + * [**以 Eureka 举例**](#以-Eureka-举例) + * [**实现 eureka.lua**](#实现-eurekalua) + * [**Eureka 与 APISIX 之间数据转换逻辑**](#Eureka-与-APISIX-之间数据转换逻辑) +* [**注册中心配置**](#注册中心配置) + * [**选择注册中心**](#选择注册中心) + * [**Eureka 的配置**](#Eureka-的配置) +* [**upstream 配置**](#upstream-配置) + +## 摘要 + +当业务量发生变化时,需要对上游服务进行扩缩容,或者因服务器硬件故障需要更换服务器。如果网关是通过配置来维护上游服务信息,在微服务架构模式下,其带来的维护成本可想而知。再者因不能及时更新这些信息,也会对业务带来一定的影响,还有人为误操作带来的影响也不可忽视,所以网关非常必要通过服务注册中心动态获取最新的服务实例信息。架构图如下所示: + +![](../images/discovery-cn.png) + +1. 服务启动时将自身的一些信息,比如服务名、IP、端口等信息上报到注册中心;各个服务与注册中心使用一定机制(例如心跳)通信,如果注册中心与服务长时间无法通信,就会注销该实例;当服务下线时,会删除注册中心的实例信息; +2. 网关会准实时地从注册中心获取服务实例信息; +3. 当用户通过网关请求服务时,网关从注册中心获取的实例列表中选择一个进行代理; + +常见的注册中心:Eureka, Etcd, Consul, Nacos, Zookeeper等 + + +## 如何扩展注册中心? + +### 基本步骤 + +APISIX 要扩展注册中心其实是件非常容易的事情,其基本步骤如下: + +1. 在 `apisix/discovery/` 目录中添加注册中心客户端的实现; +2. 实现用于初始化的 `_M.init_worker()` 函数以及用于获取服务实例节点列表的 `_M.nodes(service_name)` 函数; +3. 将注册中心数据转换为 APISIX 格式的数据; + +### 以 Eureka 举例 + +#### 实现 eureka.lua + +首先在 `apisix/discovery/` 目录中添加 [`eureka.lua`](../../apisix/discovery/eureka.lua); + +然后在 `eureka.lua` 实现用于初始化的 `init_worker` 函数以及用于获取服务实例节点列表的 `nodes` 函数即可: + + ```lua + local _M = { + version = 0.1, + } + + + function _M.nodes(service_name) + ... ... + end + + + function _M.init_worker() + ... ... + end + + + return _M + ``` + +#### Eureka 与 APISIX 之间数据转换逻辑 + +APISIX是通过 `upstream.nodes` 来配置上游服务的,所以使用注册中心后,通过注册中心获取服务的所有 node 后,赋值给 `upstream.nodes` 来达到相同的效果。那么 APISIX 是怎么将 Eureka 的数据转成 node 的呢? 假如从 Eureka 获取如下数据: + +```json +{ + "applications": { + "application": [ + { + "name": "USER-SERVICE", # 服务名称 + "instance": [ + { + "instanceId": "192.168.1.100:8761", + "hostName": "192.168.1.100", + "app": "USER-SERVICE", # 服务名称 + "ipAddr": "192.168.1.100", # 实例 IP 地址 + "status": "UP", # 状态 + "overriddenStatus": "UNKNOWN", # 覆盖状态 + "port": { + "$": 8761, # 端口 + "@enabled": "true" # 开始端口 + }, + "securePort": { + "$": 443, + "@enabled": "false" + }, + "metadata": { + "management.port": "8761", + "weight": 100 # 权重,需要通过 spring boot 应用的 eureka.instance.metadata-map.weight 进行配置 + }, + "homePageUrl": "http://192.168.1.100:8761/", + "statusPageUrl": "http://192.168.1.100:8761/actuator/info", + "healthCheckUrl": "http://192.168.1.100:8761/actuator/health", + ... ... + } + ] + } + ] + } +} +``` + +解析 instance 数据步骤: + +1. 首先要选择状态为 “UP” 的实例: overriddenStatus 值不为 "UNKNOWN" 以 overriddenStatus 为准,否则以 status 的值为准; +2. IP 地址:以 ipAddr 的值为 IP; 并且必须是 IPv4 或 IPv6 格式的; +3. 端口:端口取值规则是,如果 port["@enabled"] 等于 "true" 那么使用 port["\$"] 的值;如果 securePort["@enabled"] 等于 "true" 那么使用 securePort["$"] 的值; +4. 权重:权重取值顺序是,先判断 `metadata.weight` 是否有值,如果没有,则取配置中的 `eureka.weight` 的值, 如果还没有,则取默认值`100`; + +这个例子转成 APISIX nodes 的结果如下: + +```json +[ + { + "host" : "192.168.1.100", + "port" : 8761, + "weight" : 100, + "metadata" : { + "management.port": "8761", + } + } +] +``` + +## 注册中心配置 + +### 选择注册中心 + +首先要在 `conf/config.yaml` 文件中增加如下配置,以选择注册中心的类型: + +```yaml +apisix: + discovery: eureka +``` + +此名称要与 `apisix/discovery/` 目录中实现对应注册中心的文件名保持一致。 + +现已支持注册中心有:Eureka 。 + +### Eureka 的配置 + +在 `conf/config.yaml` 增加如下格式的配置: + +```yaml +eureka: + host: # it's possible to define multiple eureka hosts addresses of the same eureka cluster. + - "http://${usename}:${passowrd}@${eureka_host1}:${eureka_port1}" + - "http://${usename}:${passowrd}@${eureka_host2}:${eureka_port2}" + prefix: "/eureka/" + fetch_interval: 30 # 从 eureka 中拉取数据的时间间隔,默认30秒 + weight: 100 # default weight for node + timeout: + connect: 2000 # 连接 eureka 的超时时间,默认2000ms + send: 2000 # 向 eureka 发送数据的超时时间,默认2000ms + read: 5000 # 从 eureka 读数据的超时时间,默认5000ms +``` + +通过 `eureka.host ` 配置 eureka 的服务器地址。 + +如果 eureka 的地址是 `http://127.0.0.1:8761/` ,并且不需要用户名和密码验证的话,配置如下: + +```yaml +eureka: + host: + - "http://127.0.0.1:8761" + prefix: "/eureka/" +``` + +## upstream 配置 + +APISIX是通过 `upstream.service_name` 与注册中心的服务名进行关联。下面是将 uri 为 "/user/*" 的请求路由到注册中心名为 "USER-SERVICE" 的服务上例子: + +```shell +$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +{ + "uri": "/user/*", + "upstream": { + "service_name": "USER-SERVICE", + "type": "roundrobin" + } +}' + +HTTP/1.1 201 Created +Date: Sat, 31 Aug 2019 01:17:15 GMT +Content-Type: text/plain +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX web server + +{"node":{"value":{"uri":"\/user\/*","upstream": {"service_name": "USER-SERVICE", "type": "roundrobin"}},"createdIndex":61925,"key":"\/apisix\/routes\/1","modifiedIndex":61925},"action":"create"} +``` + +因为上游的接口 URL 可能会有冲突,通常会在网关通过前缀来进行区分: + +```shell +$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +{ + "uri": "/a/*", + "plugins": { + "proxy-rewrite" : { + regex_uri: ["^/a/(.*)", "/${1}"] + } + } + "upstream": { + "service_name": "A-SERVICE", + "type": "roundrobin" + } +}' + +$ curl http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +{ + "uri": "/b/*", + "plugins": { + "proxy-rewrite" : { + regex_uri: ["^/b/(.*)", "/${1}"] + } + } + "upstream": { + "service_name": "B-SERVICE", + "type": "roundrobin" + } +}' +``` + +假如 A-SERVICE 和 B-SERVICE 都提供了一个 `/test` 的接口,通过上面的配置,可以通过 `/a/test` 访问 A-SERVICE 的 `/test` 接口,通过 `/b/test` 访问 B-SERVICE 的 `/test` 接口。 + + +**注意**:配置 `upstream.service_name` 后 `upstream.nodes` 将不再生效,而是使用从注册中心的数据来替换,即使注册中心的数据是空的。 + + diff --git a/doc/getting-started-cn.md b/doc/zh-cn/getting-started.md similarity index 97% rename from doc/getting-started-cn.md rename to doc/zh-cn/getting-started.md index 8825279b030b2..556533d42c68e 100644 --- a/doc/getting-started-cn.md +++ b/doc/zh-cn/getting-started.md @@ -16,7 +16,7 @@ # limitations under the License. # --> -[English](getting-started.md) +[English](../getting-started.md) # 快速入门指南 @@ -38,12 +38,12 @@ $ curl --location --request GET "https://httpbin.org/get?foo1=bar1&foo2=bar2" ## 前提 -- 本指南使用 docker 和 docker compose 来安装 Apache APISIX。 但是, 如果您已经以其他方式安装了 Apache APISIX ,您只需跳到 [第二步](getting-started-cn.md#第二步:-在-APISIX-中设置路由) 。 +- 本指南使用 docker 和 docker compose 来安装 Apache APISIX。 但是, 如果您已经以其他方式安装了 Apache APISIX ,您只需跳到 [第二步](getting-started.md#第二步:-在-APISIX-中设置路由) 。 - Curl:指南使用 Curl 命令进行 API 测试,但是您也可以使用您选择的任何其他工具( 例如 Postman )。 ## 第一步: 安装 APISIX -Apache APISIX 可以多种操作环境中安装。[如何安装文档](how-to-build-cn.md#installation-via-source-release) 显示了多个平台中的安装步骤。 +Apache APISIX 可以多种操作环境中安装。[如何安装文档](how-to-build.md#installation-via-source-release) 显示了多个平台中的安装步骤。 为了快速入门,让我们基于 docker 容器的安装方式进行安装。启动 Apache APISIX 服务,我们可以参照这个镜像文件[repository](https://github.com/apache/incubator-apisix-docker) 并切换到 example 文件夹下执行如下命令。 如下命令会启动 Apache APISIX 服务并默认在 9080 端口( https 请求是 9443 端口) 提供 admin API 接口服务 @@ -240,7 +240,7 @@ curl -i -X GET http://127.0.0.1:9080/samplePrefix/get?param1=foo¶m2=bar -H ' 到目前为止,已经通过使用 admin API 接口编排对 Apache APISIX 的 API 的调用。然而,Apache APISIX 还提供执行类似操作的一个 web 应用,就是web控制台。 可以在[repository](https://github.com/apache/incubator-apisix)中使用。控制台是直观的,您可以通过它编排同样的路由配置。 -![Dashboard](images/dashboard.png) +![Dashboard](../images/dashboard.png) ### 故障排查 diff --git a/doc/grpc-proxy-cn.md b/doc/zh-cn/grpc-proxy.md similarity index 96% rename from doc/grpc-proxy-cn.md rename to doc/zh-cn/grpc-proxy.md index 8c5a4e625a055..3f76fd8bec659 100644 --- a/doc/grpc-proxy-cn.md +++ b/doc/zh-cn/grpc-proxy.md @@ -17,7 +17,7 @@ # --> -[English](grpc-proxy.md) +[English](../grpc-proxy.md) # grpc-proxy @@ -35,7 +35,7 @@ 在指定 Route 中,代理 gRPC 服务接口: * 注意: 这个 Route 的属性 `service_protocol` 必须设置为 `grpc`; -* 注意: APISIX 使用 TLS 加密的 HTTP/2 暴露 gRPC 服务, 所以需要先 [配置 SSL 证书](https-cn.md); +* 注意: APISIX 使用 TLS 加密的 HTTP/2 暴露 gRPC 服务, 所以需要先 [配置 SSL 证书](https.md); * 下面例子所代理的 gRPC 服务可供参考:[grpc_server_example](https://github.com/iresty/grpc_server_example)。 ```shell diff --git a/doc/zh-cn/health-check.md b/doc/zh-cn/health-check.md new file mode 100644 index 0000000000000..6be4691c98b3d --- /dev/null +++ b/doc/zh-cn/health-check.md @@ -0,0 +1,103 @@ + + +# [English](../health-check.md) + +## Upstream的健康检查 + +APISIX的健康检查使用[lua-resty-healthcheck](https://github.com/Kong/lua-resty-healthcheck)实现,你可以在upstream中使用它。 + +下面是一个检查检查的例子: + +```shell +curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1970": 1 + }, + "type": "roundrobin", + "retries": 2, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 2, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + }, + "req_headers": ["User-Agent: curl/7.29.0"] + }, + "passive": { + "healthy": { + "http_statuses": [200, 201], + "successes": 3 + }, + "unhealthy": { + "http_statuses": [500], + "http_failures": 3, + "tcp_failures": 3 + } + } + } + } +}' +``` + +监控检查的配置内容在`checks`中,`checks`包含两个类型:`active` 和 `passive`,详情如下 + +* `active`: 要启动探活健康检查,需要在upstream配置中的 `checks.active` 添加如下配置项。 + + * `active.http_path`: 用于发现upstream节点健康可用的HTTP GET请求路径。 + * `active.host`: 用于发现upstream节点健康可用的HTTP请求主机名。 + + `healthy`的阀值字段: + * `active.healthy.interval`: 健康的目标节点的健康检查间隔时间(以秒为单位),最小值为1。 + * `active.healthy.successes`: 确定目标是否健康的成功次数,最小值为1。 + + `unhealthy`的阀值字段: + * `active.unhealthy.interval`: 针对不健康目标节点的健康检查之间的间隔(以秒为单位),最小值为1。 + * `active.unhealthy.http_failures`: 确定目标节点不健康的http请求失败次数,最小值为1。 + * `active.req_headers`: 其他请求标头。数组格式,可以填写多个标题。 + +* `passive`: 要启用被动健康检查,需要在upstream配置中的 `checks.passive` 添加如下配置项。 + + `healthy`的阀值字段: + * `passive.healthy.http_statuses`: 如果当前HTTP响应状态码是其中任何一个,则将upstream节点设置为 `healthy` 状态。否则,请忽略此请求。 + * `passive.healthy.successes`: 如果upstream节点被检测成功(由 `passive.healthy.http_statuses` 定义)的次数超过 `successes` 次,则将该节点设置为 `healthy` 状态。 + + `unhealthy`的阀值字段: + * `passive.unhealthy.http_statuses`: 如果当前HTTP响应状态码是其中任何一个,则将upstream节点设置为 `unhealthy` 状态。否则,请忽略此请求。 + * `passive.unhealthy.tcp_failures`: 如果TCP通讯失败次数超过 `tcp_failures` 次,则将upstream节点设置为 `unhealthy` 状态。 + * `passive.unhealthy.timeouts`: 如果被动健康检查超时次数超过 `timeouts` 次,则将upstream节点设置为 `unhealthy` 状态。 + * `passive.unhealthy.http_failures`: 如果被动健康检查的HTTP请求失败(由 `passive.unhealthy.http_statuses` 定义)的次数超过 `http_failures`次,则将upstream节点设置为 `unhealthy` 状态。 diff --git a/doc/how-to-build-cn.md b/doc/zh-cn/how-to-build.md similarity index 93% rename from doc/how-to-build-cn.md rename to doc/zh-cn/how-to-build.md index 3f54eb0346204..303dcd680f014 100644 --- a/doc/how-to-build-cn.md +++ b/doc/zh-cn/how-to-build.md @@ -34,20 +34,20 @@ Apache APISIX 的运行环境需要 Nginx 和 etcd, 你需要先下载 Apache Release 源码包: ```shell -wget http://www.apache.org/dist/incubator/apisix/1.2/apache-apisix-1.2-incubating-src.tar.gz -tar zxvf apache-apisix-1.2-incubating-src.tar.gz +wget http://www.apache.org/dist/incubator/apisix/1.4/apache-apisix-1.4-incubating-src.tar.gz +tar zxvf apache-apisix-1.4-incubating-src.tar.gz ``` 安装运行时依赖的 Lua 库: ``` -cd apache-apisix-1.2-incubating +cd apache-apisix-1.4-incubating make deps ``` ### 通过 RPM 包安装(CentOS 7) ```shell -sudo yum install -y https://github.com/apache/incubator-apisix/releases/download/1.2/apisix-1.2-0.el7.noarch.rpm +sudo yum install -y https://github.com/apache/incubator-apisix/releases/download/1.4/apisix-1.4-0.el7.noarch.rpm ``` ### 通过 Luarocks 安装 (不支持 macOS) @@ -63,11 +63,11 @@ sudo sh -c "$(curl -fsSL https://raw.githubusercontent.com/apache/incubator-apis > 通过 Luarocks 安装指定的版本: ```shell -# 安装 apisix 的 1.2 版本 -sudo luarocks install --lua-dir=/path/openresty/luajit apisix 1.2 +# 安装 apisix 的 1.4 版本 +sudo luarocks install --lua-dir=/path/openresty/luajit apisix 1.4 # 老版本 luarocks 可能不支持 `lua-dir` 参数,可以删除该选项 -sudo luarocks install apisix 1.2 +sudo luarocks install apisix 1.4 ``` ## 3. 管理(启动、关闭等)APISIX 服务 diff --git a/doc/https-cn.md b/doc/zh-cn/https.md similarity index 99% rename from doc/https-cn.md rename to doc/zh-cn/https.md index 4ea82d5f21fb3..33ce14c22137d 100644 --- a/doc/https-cn.md +++ b/doc/zh-cn/https.md @@ -17,7 +17,7 @@ # --> -[English](https.md) +[English](../https.md) ### HTTPS diff --git a/doc/zh-cn/install-dependencies.md b/doc/zh-cn/install-dependencies.md new file mode 100644 index 0000000000000..f0d4201a7e33e --- /dev/null +++ b/doc/zh-cn/install-dependencies.md @@ -0,0 +1,153 @@ + + +# 安装依赖 +- [CentOS 6](#centos-6) +- [CentOS 7](#centos-7) +- [Fedora 31 & 32](#fedora-31--32) +- [Ubuntu 16.04 & 18.04](#ubuntu-1604--1804) +- [Debian 9 & 10](#debian-9--10) +- [Mac OSX](#mac-osx) +- [如何编译 Openresty](#如何编译-openresty) +- [注意](#注意) + +CentOS 6 +======== + +```shell +# 添加 OpenResty 源 +sudo yum install yum-utils +sudo yum-config-manager --add-repo https://openresty.org/package/centos/openresty.repo + +# 安装 OpenResty, etcd 和 编译工具 +sudo yum install -y openresty curl git gcc luarocks lua-devel make + +wget https://github.com/etcd-io/etcd/releases/download/v3.3.13/etcd-v3.3.13-linux-amd64.tar.gz +tar -xvf etcd-v3.3.13-linux-amd64.tar.gz && \ + cd etcd-v3.3.13-linux-amd64 && \ + sudo cp -a etcd etcdctl /usr/bin/ + +# 开启 etcd server +nohup etcd & +``` + +CentOS 7 +======== + +```shell +# 安装 epel, `luarocks` 需要它 +wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm +sudo rpm -ivh epel-release-latest-7.noarch.rpm + +# 添加 OpenResty 源 +sudo yum install yum-utils +sudo yum-config-manager --add-repo https://openresty.org/package/centos/openresty.repo + +# 安装 OpenResty, etcd 和 编译工具 +sudo yum install -y etcd openresty curl git gcc luarocks lua-devel + +# 开启 etcd server +sudo service etcd start +``` + +Fedora 31 & 32 +============== + +```shell +# add OpenResty source +sudo yum install yum-utils +sudo yum-config-manager --add-repo https://openresty.org/package/fedora/openresty.repo + +# install OpenResty, etcd and some compilation tools +sudo yum install -y etcd openresty curl git gcc luarocks lua-devel + +# start etcd server +sudo etcd --enable-v2=true & +``` + +Ubuntu 16.04 & 18.04 +==================== + +```shell +# 添加 OpenResty 源 +wget -qO - https://openresty.org/package/pubkey.gpg | sudo apt-key add - +sudo apt-get update +sudo apt-get -y install software-properties-common +sudo add-apt-repository -y "deb http://openresty.org/package/ubuntu $(lsb_release -sc) main" +sudo apt-get update + +# 安装 OpenResty, etcd 和 编译工具 +sudo apt-get install -y git etcd openresty curl luarocks + +# 开启 etcd server +sudo service etcd start +``` + +Debian 9 & 10 +============= + +```shell +# 可选 +sed -i 's|^deb http://deb.debian.org/debian|deb http://mirrors.huaweicloud.com/debian|g' /etc/apt/sources.list +sed -i 's|^deb http://security.debian.org/debian-security|deb http://mirrors.huaweicloud.com/debian-security|g' /etc/apt/sources.list +apt update +apt install wget gnupg -y + +# 添加 OpenResty 源 +wget -qO - https://openresty.org/package/pubkey.gpg | sudo apt-key add - +sudo apt-get -y install software-properties-common +sudo add-apt-repository -y "deb http://openresty.org/package/debian $(lsb_release -sc) openresty" +sudo apt-get update + +# 安装 etcd +wget https://github.com/etcd-io/etcd/releases/download/v3.3.13/etcd-v3.3.13-linux-amd64.tar.gz +tar -xvf etcd-v3.3.13-linux-amd64.tar.gz && \ + cd etcd-v3.3.13-linux-amd64 && \ + sudo cp -a etcd etcdctl /usr/bin/ + +# 安装 OpenResty, etcd 和 编译工具 +sudo apt-get install -y git openresty curl luarocks make + +# 开启 etcd server +nohup etcd & +``` + +Mac OSX +======= + +```shell +# 安装 OpenResty, etcd 和 编译工具 +brew install openresty/brew/openresty etcd luarocks curl git + +# 开启 etcd server 并启用 v2 的功能 +etcd --enable-v2=true & +``` + +如何编译 Openresty +============================ + +编译 Openresty 是一件比较复杂的事情,没办法简单地说明白。所以我们推荐你直接参考官方的安装文档。 + +http://openresty.org/en/linux-packages.html + +注意 +==== +- Apache APISIX 目前只支持 `v2` 版本的 etcd,但是最新版的 etcd (从 3.4 起)已经默认关闭了 `v2` 版本的功能。所以你需要添加启动参数 `--enable-v2=true` 来开启 `v2` 的功能,目前对 `v3` etcd 的开发工作已经启动,不久后便可投入使用。 + +- 如果你要想使用 Tengine 替代 OpenResty,请参考 [Install Tengine at Ubuntu](../../.travis/linux_tengine_runner.sh)。 diff --git a/doc/plugin-develop-cn.md b/doc/zh-cn/plugin-develop.md similarity index 95% rename from doc/plugin-develop-cn.md rename to doc/zh-cn/plugin-develop.md index c8a7663b8d301..85083ddf1e25a 100644 --- a/doc/plugin-develop-cn.md +++ b/doc/zh-cn/plugin-develop.md @@ -16,7 +16,7 @@ # limitations under the License. # --> -[English](plugin-develop.md) +[English](../plugin-develop.md) # 目录 @@ -95,6 +95,12 @@ plugins: # plugin list 注:先后顺序与执行顺序无关。 +特别需要注意的是,如果你的插件有新建自己的代码目录,那么就需要修改 Makefile 文件,新增创建文件夹的操作,比如: +``` +$(INSTALL) -d $(INST_LUADIR)/apisix/plugins/skywalking +$(INSTALL) apisix/plugins/skywalking/*.lua $(INST_LUADIR)/apisix/plugins/skywalking/ +``` + ## 配置描述与校验 定义插件的配置项,以及对应的 [Json Schema](https://json-schema.org) 描述,并完成对 json 的校验,这样方便对配置的数据规 diff --git a/doc/plugins-cn.md b/doc/zh-cn/plugins.md similarity index 97% rename from doc/plugins-cn.md rename to doc/zh-cn/plugins.md index 103cfb070810c..dc861fb6029e8 100644 --- a/doc/plugins-cn.md +++ b/doc/zh-cn/plugins.md @@ -17,7 +17,7 @@ # --> -[English](plugins.md) +[English](../plugins.md) ## 热加载 diff --git a/doc/zh-cn/plugins/authz-keycloak-cn.md b/doc/zh-cn/plugins/authz-keycloak-cn.md new file mode 100644 index 0000000000000..fe433c8fe6acd --- /dev/null +++ b/doc/zh-cn/plugins/authz-keycloak-cn.md @@ -0,0 +1,124 @@ + + +[English](../../plugins/authz-keycloak.md) + +# 目录 +- [**名字**](#名字) +- [**属性**](#属性) +- [**如何启用**](#如何启用) +- [**测试插件**](#测试插件) +- [**禁用插件**](#禁用插件) +- [**示例**](#示例) + +## 名字 + +`authz-keycloak` 是和 Keycloak Identity Server 配合使用的鉴权插件。Keycloak 是一种兼容 OAuth/OIDC 和 UMA 协议的身份认证服务器。尽管本插件是和 Keycloak 服务器配合开发的,但也应该能够适配任意兼容 OAuth/OIDC 和 UMA 协议的身份认证服务器。 + +有关 Keycloak 的更多信息,可参考 [Keycloak Authorization Docs](https://www.keycloak.org/docs/latest/authorization_services) 查看更多信息。 + +## 属性 + +|名称 |选项 |描述| +|--------- |-------- |-----------| +| token_endpoint|必填 |接受 OAuth2 兼容 token 的接口,需要支持 `urn:ietf:params:oauth:grant-type:uma-ticket` 授权类型| +| grant_type |选填 |默认值为 `urn:ietf:params:oauth:grant-type:uma-ticket`| +| audience |选填 |客户端应用访问相应的资源服务器时所需提供的身份信息。当 permissions 参数有值时这个参数是必填的。| +| permissions |选填 |描述客户端应用所需访问的资源和权限范围的字符串。格式必须为:`RESOURCE_ID#SCOPE_ID`| +| timeout |选填 |与身份认证服务器的 http 连接的超时时间。默认值为 3 秒。| +| policy_enforcement_mode|必填 |只能是 ENFORCING 或 PERMISSIVE。| + +### 策略执行模式 + +定义了在处理身份认证请求时如何应用策略 + +**Enforcing** + +- (默认)如果资源没有绑定任何访问策略,请求默认会被拒绝。 + +**Permissive** + +- 如果资源没有绑定任何访问策略,请求会被允许。 + +## 如何启用 + +创建一个 `route` 对象,并在该 `route` 对象上启用 `authz-keycloak` 插件: + +```shell +curl http://127.0.0.1:9080/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/get", + "plugins": { + "authz-keycloak": { + "token_endpoint": "http://127.0.0.1:8090/auth/realms/{client_id}/protocol/openid-connect/token", + "permissions": ["resource name#scope name"], + "audience": "Client ID" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8080": 1 + } + } +} +``` + +## 测试插件 + +```shell +curl http://127.0.0.1:9080/get -H 'Authorization: Bearer {JWT Token}' +``` + +## 禁用插件 + +在插件设置页面中删除相应的 json 配置即可禁用 `authz-keycloak` 插件。APISIX 的插件是热加载的,因此无需重启 APISIX 服务。 + +```shell +curl http://127.0.0.1:9080/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/get", + "plugins": { + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8080": 1 + } + } +} +``` + +## 示例 + +请查看 authz-keycloak.t 中的单元测试来了解如何将身份认证策略与您的 API 工作流集成。运行以下 docker 镜像并访问 `http://localhost:8090` 来查看单元测试中绑定的访问策略: + +```bash +docker run -e KEYCLOAK_USER=admin -e KEYCLOAK_PASSWORD=123456 -p 8090:8080 sshniro/keycloak-apisix +``` + +下面这张截图显示了如何在 Keycloak 服务器上配置访问策略: + +![Keycloak policy design](../../images/plugin/authz-keycloak.png) + +## 后续开发 + +- 目前 `authz-plugin` 仅支持通过定义资源名和访问权限范畴来应用 `route` 的访问策略。但是 Keycloak 官方适配的其他语言的客户端 (Java, JS) 还可以通过动态查询 Keycloak 路径以及懒加载身份资源的路径来支持路径匹配。未来版本的 `authz-plugin` 将会支持这项功能。 + +- 支持从 Keycloak JSON 文件中读取权限范畴和其他配置项。 diff --git a/doc/plugins/basic-auth-cn.md b/doc/zh-cn/plugins/basic-auth.md similarity index 96% rename from doc/plugins/basic-auth-cn.md rename to doc/zh-cn/plugins/basic-auth.md index e4e862d1ea83f..f4442f0f658b6 100644 --- a/doc/plugins/basic-auth-cn.md +++ b/doc/zh-cn/plugins/basic-auth.md @@ -17,7 +17,7 @@ # --> -# [English](basic-auth.md) +# [English](../../plugins/basic-auth.md) # 目录 @@ -58,10 +58,10 @@ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f1 ``` 你可以使用浏览器打开 dashboard:`http://127.0.0.1:9080/apisix/dashboard/`,通过 web 界面来完成上面的操作,先增加一个 consumer: -![auth-1](../images/plugin/basic-auth-1.png) +![auth-1](../../images/plugin/basic-auth-1.png) 然后在 consumer 页面中添加 basic-auth 插件: -![auth-2](../images/plugin/basic-auth-2.png) +![auth-2](../../images/plugin/basic-auth-2.png) ### 2. 创建 Route 或 Service 对象,并开启 `basic-auth` 插件。 diff --git a/doc/plugins/batch-requests-cn.md b/doc/zh-cn/plugins/batch-requests.md similarity index 94% rename from doc/plugins/batch-requests-cn.md rename to doc/zh-cn/plugins/batch-requests.md index dc06e862bdef0..bea6bd5ffc7d8 100644 --- a/doc/plugins/batch-requests-cn.md +++ b/doc/zh-cn/plugins/batch-requests.md @@ -17,7 +17,7 @@ # --> -# [English](batch-requests.md) +# [English](../../plugins/batch-requests.md) # 目录 @@ -32,6 +32,10 @@ `batch-requests` 插件可以一次接受多个请求并以 [http pipeline](https://en.wikipedia.org/wiki/HTTP_pipelining) 的方式在网关发起多个http请求,合并结果后再返回客户端,这在客户端需要访问多个接口时可以显著地提升请求性能。 +> **提示** +> +> 外层的 Http 请求头会自动设置到每一个独立请求中,如果独立请求中出现相同键值的请求头,那么只有独立请求的请求头会生效。 + ## 属性 无 @@ -67,7 +71,7 @@ #### HttpResponse | 参数名 | 类型 | 描述 | -| --- | --- | --- | --- | --- | +| --- | --- | --- | | status | Integer | Http 请求的状态码 | | reason | String | Http 请求的返回信息 | | body | String | Http 请求的响应体 | diff --git a/doc/zh-cn/plugins/consumer-restriction.md b/doc/zh-cn/plugins/consumer-restriction.md new file mode 100644 index 0000000000000..c1f3d1e359666 --- /dev/null +++ b/doc/zh-cn/plugins/consumer-restriction.md @@ -0,0 +1,128 @@ + + +[English](../../plugins/consumer-restriction.md) + +# 目录 +- [**名字**](#名字) +- [**属性**](#属性) +- [**如何启用**](#如何启用) +- [**测试插件**](#测试插件) +- [**禁用插件**](#禁用插件) + +## 名字 + +`consumer-restriction` 可以通过以下方式限制对服务或路线的访问,将 consumer 列入白名单或黑名单。 支持单个或多个 consumer。 + +## 属性 + +* `whitelist`: 可选,加入白名单的consumer +* `blacklist`: 可选,加入黑名单的consumer + +只能单独启用白名单或黑名单,两个不能一起使用。 + +## 如何启用 + +下面是一个示例,在指定的 route 上开启了 `consumer-restriction` 插件,限制consumer访问: + + +```shell +curl http://127.0.0.1:9080/apisix/admin/consumers/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +{ + "username": "jack1", + "plugins": { + "basic-auth": { + "username":"jack2019", + "password": "123456" + } + } +}' + +curl http://127.0.0.1:9080/apisix/admin/consumers/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +{ + "username": "jack2", + "plugins": { + "basic-auth": { + "username":"jack2020", + "password": "123456" + } + } +}' + +curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {}, + "consumer-restriction": { + "whitelist": [ + "jack1" + ] + } + } +}' +``` + +## 测试插件 + +jack1 访问: + +```shell +$ curl -u jack2019:123456 http://127.0.0.1:9080/index.html +HTTP/1.1 200 OK +... +``` + +jack2 访问: + +```shell +$ curl -u jack2020:123456 http://127.0.0.1:9080/index.html -i +HTTP/1.1 403 Forbidden +... +{"message":"You are not allowed"} +``` + +## 禁用插件 + +当你想去掉 `consumer-restriction` 插件的时候,很简单,在插件的配置中把对应的 json 配置删除即可,无须重启服务,即刻生效: + +```shell +$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {} + } +}' +``` + +现在就已移除 `consumer-restriction` 插件,其它插件的开启和移除也类似。 + diff --git a/doc/plugins/cors-cn.md b/doc/zh-cn/plugins/cors.md similarity index 99% rename from doc/plugins/cors-cn.md rename to doc/zh-cn/plugins/cors.md index 413dc95acc854..bc26df7307cf6 100644 --- a/doc/plugins/cors-cn.md +++ b/doc/zh-cn/plugins/cors.md @@ -17,7 +17,7 @@ # --> -# [English](cors.md) +# [English](../../plugins/cors.md) # 目录 diff --git a/doc/zh-cn/plugins/echo.md b/doc/zh-cn/plugins/echo.md new file mode 100644 index 0000000000000..71122fb4c309b --- /dev/null +++ b/doc/zh-cn/plugins/echo.md @@ -0,0 +1,92 @@ + + +# 目录 +- [**简介**](#简介) +- [**属性**](#属性) +- [**如何启用**](#如何启用) +- [**测试插件**](#测试插件) +- [**禁用插件**](#禁用插件) + +## 简介 + +echo 是一个有用的插件,可帮助用户尽可能全面地了解如何开发APISIX插件。 + + +该插件展示了如何在常见的 phase 中实现相应的功能,常见的 phase 有:init, rewrite, access, balancer, header filer, body filter 以及 log。 + +## 属性 + +|属性名称 |必选项 |描述| +|--------- |--------|-----------| +| before_body |可选| 在 body 属性之前添加的内容,如果 body 属性没有指定将添加在 upstream response body 之前。 | +| body |可选| 返回给客户端的响应内容,它将覆盖 upstream 返回的响应 body。 | +| after_body |可选| 在 body 属性之后添加的内容,如果 body 属性没有指定将在 upstream 响应 body 之后添加。 | + +## 如何启用 + +1. 为特定路由启用 echo 插件。 + +```shell +curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins": { + "echo": { + "before_body": "before the body modification " + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' +``` + +## 测试插件 + +* 成功: + +```shell +$ curl -i http://127.0.0.1:9080/hello +HTTP/1.1 200 OK +... +before the body modification hello world +``` + +## 禁用插件 + +当您要禁用`echo`插件时,这很简单,您可以在插件配置中删除相应的json配置,无需重新启动服务,它将立即生效: + +```shell +$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value=' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/doc/plugins/fault-injection-cn.md b/doc/zh-cn/plugins/fault-injection.md similarity index 98% rename from doc/plugins/fault-injection-cn.md rename to doc/zh-cn/plugins/fault-injection.md index 29453a8115b43..d6155efe1b66e 100644 --- a/doc/plugins/fault-injection-cn.md +++ b/doc/zh-cn/plugins/fault-injection.md @@ -17,7 +17,7 @@ # --> -# [English](fault-injection.md) +# [English](../../plugins/fault-injection.md) # fault-injection diff --git a/doc/plugins/grpc-transcoding-cn.md b/doc/zh-cn/plugins/grpc-transcode.md similarity index 98% rename from doc/plugins/grpc-transcoding-cn.md rename to doc/zh-cn/plugins/grpc-transcode.md index 7bb4faa5d1202..b0c74601ffe93 100644 --- a/doc/plugins/grpc-transcoding-cn.md +++ b/doc/zh-cn/plugins/grpc-transcode.md @@ -17,9 +17,9 @@ # --> -# [English](grpc-transcoding.md) +# [English](../../plugins/grpc-transcode.md) -# grpc-transcoding +# grpc-transcode HTTP(s) -> APISIX -> gRPC server diff --git a/doc/zh-cn/plugins/http-logger.md b/doc/zh-cn/plugins/http-logger.md new file mode 100644 index 0000000000000..01e3ee10ca0e4 --- /dev/null +++ b/doc/zh-cn/plugins/http-logger.md @@ -0,0 +1,97 @@ + + +# 目录 +- [**定义**](#name) +- [**属性列表**](#attributes) +- [**如何开启**](#how-to-enable) +- [**测试插件**](#test-plugin) +- [**禁用插件**](#disable-plugin) + +## 定义 + +`http-logger` 是一个插件,可将Log数据请求推送到 HTTP / HTTPS 服务器。 + +这将提供将 Log 数据请求作为JSON对象发送到监视工具和其他 HTTP 服务器的功能。 + +## 属性列表 + +|名称 |必选项 |描述| +|--------- |--------|-----------| +| uri |必要的| 服务器的 URI | +| authorization |可选的| 授权头部 | +| keepalive |可选的|发送请求后保持连接活动的时间| +| name |可选的|标识 logger 的唯一标识符| +| batch_max_size |可选的|每批的最大大小,默认为 1000| +| inactive_timeout |可选的|刷新缓冲区的最大时间(以秒为单位),默认值为 5| +| buffer_duration |可选的|必须先处理批次中最旧条目的最长期限(以秒为单位),默认值为 5| +| max_retry_count |可选的|从处理管道中移除之前的最大重试次数,默认为 0| +| retry_delay |可选的|如果执行失败,则应延迟执行流程的秒数,默认为 1| + +## 如何开启 + +1. 这是有关如何为特定路由启用 http-logger 插件的示例。 + +```shell +curl http://127.0.0.1:9080/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins": { + "http-logger": { + "uri": "127.0.0.1:80/postendpoint?param=1" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +## 测试插件 + +* 成功: + +```shell +$ curl -i http://127.0.0.1:9080/hello +HTTP/1.1 200 OK +... +hello, world +``` + +## 禁用插件 + +在插件配置中删除相应的 json 配置以禁用 http-logger。APISIX 插件是热重载的,因此无需重新启动 APISIX: + +```shell +$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value=' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/doc/plugins/ip-restriction-cn.md b/doc/zh-cn/plugins/ip-restriction.md similarity index 94% rename from doc/plugins/ip-restriction-cn.md rename to doc/zh-cn/plugins/ip-restriction.md index c9f1fcfdb5cfe..89ecf5363e559 100644 --- a/doc/plugins/ip-restriction-cn.md +++ b/doc/zh-cn/plugins/ip-restriction.md @@ -17,7 +17,7 @@ # --> -[English](ip-restriction.md) +[English](../../plugins/ip-restriction.md) # 目录 - [**名字**](#名字) @@ -86,7 +86,7 @@ HTTP/1.1 403 Forbidden 当你想去掉 `ip-restriction` 插件的时候,很简单,在插件的配置中把对应的 json 配置删除即可,无须重启服务,即刻生效: ```shell -$ curl http://127.0.0.1:2379/v2/keys/apisix/routes/1 -X PUT -d value=' +$ curl http://127.0.0.1:2379/v2/keys/apisix/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value=' { "uri": "/index.html", "plugins": {}, diff --git a/doc/plugins/jwt-auth-cn.md b/doc/zh-cn/plugins/jwt-auth.md similarity index 97% rename from doc/plugins/jwt-auth-cn.md rename to doc/zh-cn/plugins/jwt-auth.md index 8dfc5cb6b04ed..8a33e8eec6e5c 100644 --- a/doc/plugins/jwt-auth-cn.md +++ b/doc/zh-cn/plugins/jwt-auth.md @@ -17,7 +17,7 @@ # --> -[English](jwt-auth.md) +[English](../../plugins/jwt-auth.md) # 目录 - [**名字**](#名字) @@ -59,10 +59,10 @@ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f1 }' ``` 你可以使用浏览器打开 dashboard:`http://127.0.0.1:9080/apisix/dashboard/`,通过 web 界面来完成上面的操作,先增加一个 consumer: -![](../images/plugin/jwt-auth-1.png) +![](../../images/plugin/jwt-auth-1.png) 然后在 consumer 页面中添加 jwt-auth 插件: -![](../images/plugin/jwt-auth-2.png) +![](../../images/plugin/jwt-auth-2.png) 2. 创建 Route 或 Service 对象,并开启 `jwt-auth` 插件。 diff --git a/doc/zh-cn/plugins/kafka-logger.md b/doc/zh-cn/plugins/kafka-logger.md new file mode 100644 index 0000000000000..eecded5da01e0 --- /dev/null +++ b/doc/zh-cn/plugins/kafka-logger.md @@ -0,0 +1,127 @@ + + +# 目录 +- [**简介**](#简介) +- [**属性**](#属性) +- [**工作原理**](#工作原理) +- [**如何启用**](#如何启用) +- [**测试插件**](#测试插件) +- [**禁用插件**](#禁用插件) + +## 简介 + +`kafka-logger` 是一个插件,可用作ngx_lua nginx 模块的 Kafka 客户端驱动程序。 + +它可以将接口请求日志以 JSON 的形式推送给外部 Kafka 集群。如果在短时间内没有收到日志数据,请放心,它会在我们的批处理处理器中的计时器功能到期后自动发送日志。 + +有关 Apache APISIX 中 Batch-Processor 的更多信息,请参考。 +[Batch-Processor](../batch-processor.md) + +## 属性 + +|属性名称 |必选项 |描述| +|--------- |--------|-----------| +| broker_list |必须| 要推送的 kafka 的 broker 列表。| +| kafka_topic |必须| 要推送的 topic。| +| timeout |可选| 发送数据的超时时间。| +| key |必须| 用于加密消息的密钥。| +| name |必须| batch processor 的唯一标识。| +| batch_max_size |可选| 批量发送的消息最大数量,当到达该阀值后会立即发送消息| +| inactive_timeout |可选| 不活跃时间,如果在该时间范围内都没有消息写入缓冲区,那么会立即发送到 kafka。默认值: 5(s)| +| buffer_duration |可选| 缓冲周期,消息停留在缓冲区的最大时间,当超过该时间时会立即发送到 kafka。默认值: 60(s)| +| max_retry_count |可选| 最大重试次数。默认值: 0| +| retry_delay |可选| 重试间隔。默认值: 1(s)| + +## 工作原理 + +消息将首先写入缓冲区。 +当缓冲区超过`batch_max_size`时,它将发送到kafka服务器, +或每个`buffer_duration`刷新缓冲区。 + +如果成功,则返回“ true”。 +如果出现错误,则返回“ nil”,并带有描述错误的字符串(`buffer overflow`)。 + +##### Broker 列表 + +插件支持一次推送到多个 Broker,如下配置: + +```json +{ + "127.0.0.1":9092, + "127.0.0.1":9093 +} +``` + +## 如何启用 + +1. 为特定路由启用 kafka-logger 插件。 + +```shell +curl http://127.0.0.1:9080/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins": { + "kafka-logger": { + "broker_list" : + { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' +``` + +## 测试插件 + +* 成功: + +```shell +$ curl -i http://127.0.0.1:9080/hello +HTTP/1.1 200 OK +... +hello, world +``` + +## 禁用插件 + +当您要禁用`kafka-logger`插件时,这很简单,您可以在插件配置中删除相应的json配置,无需重新启动服务,它将立即生效: + +```shell +$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value=' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/doc/plugins/key-auth-cn.md b/doc/zh-cn/plugins/key-auth.md similarity index 96% rename from doc/plugins/key-auth-cn.md rename to doc/zh-cn/plugins/key-auth.md index e7b392c69bea5..0c2c75c7782e8 100644 --- a/doc/plugins/key-auth-cn.md +++ b/doc/zh-cn/plugins/key-auth.md @@ -17,7 +17,7 @@ # --> -[English](key-auth.md) +[English](../../plugins/key-auth.md) # 目录 - [**名字**](#名字) @@ -54,10 +54,10 @@ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f1 ``` 你可以使用浏览器打开 dashboard:`http://127.0.0.1:9080/apisix/dashboard/`,通过 web 界面来完成上面的操作,先增加一个 consumer: -![](../images/plugin/key-auth-1.png) +![](../../images/plugin/key-auth-1.png) 然后在 consumer 页面中添加 key-auth 插件: -![](../images/plugin/key-auth-2.png) +![](../../images/plugin/key-auth-2.png) 2. 创建 route 或 service 对象,并开启 `key-auth` 插件。 diff --git a/doc/plugins/limit-conn-cn.md b/doc/zh-cn/plugins/limit-conn.md similarity index 84% rename from doc/plugins/limit-conn-cn.md rename to doc/zh-cn/plugins/limit-conn.md index fb0b4d70b4edd..9bb7c3c76e2f1 100644 --- a/doc/plugins/limit-conn-cn.md +++ b/doc/zh-cn/plugins/limit-conn.md @@ -17,15 +17,15 @@ # --> -[English](limit-conn.md) +[English](../../plugins/limit-conn.md) # limit-conn -Apisix 的限制并发请求(或并发连接)插件。 +限制并发请求(或并发连接)插件。 ### 属性 -* `conn`: 允许的最大并发请求数。 超过这个比率的请求(低于“ conn” + “ burst”)将被延迟以符合这个阈值。 -* `burst`: 允许延迟的过多并发请求(或连接)的数量。 +* `conn`: 允许的最大并发请求数。超过 `conn` 的限制、但是低于 `conn` + `burst` 的请求,将被延迟处理。 +* `burst`: 允许被延迟处理的并发请求数。 * `default_conn_delay`: 默认的典型连接(或请求)的处理延迟时间。 * `key`: 用户指定的限制并发级别的关键字,可以是客户端IP或服务端IP。 @@ -33,7 +33,8 @@ Apisix 的限制并发请求(或并发连接)插件。 现在接受以下关键字: “remote_addr”(客户端的 IP),“server_addr”(服务器的 IP),请求头中的“ X-Forwarded-For/X-Real-IP”。 -* `rejected_code`: 当请求超过阈值时返回的 HTTP状态码, 默认值是503。 + **key 是可以被用户自定义的,只需要修改插件的一行代码即可完成。并没有在插件中放开是处于安全的考虑。** +* `rejected_code`: 当请求超过 `conn` + `burst` 这个阈值时,返回的 HTTP状态码,默认值是503。 #### 如何启用 @@ -64,10 +65,10 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 ``` 你可以使用浏览器打开 dashboard:`http://127.0.0.1:9080/apisix/dashboard/`,通过 web 界面来完成上面的操作,先增加一个 route: -![](../images/plugin/limit-conn-1.png) +![](../../images/plugin/limit-conn-1.png) 然后在 route 页面中添加 limit-conn 插件: -![](../images/plugin/limit-conn-2.png) +![](../../images/plugin/limit-conn-2.png) #### test plugin diff --git a/doc/plugins/limit-count-cn.md b/doc/zh-cn/plugins/limit-count.md similarity index 94% rename from doc/plugins/limit-count-cn.md rename to doc/zh-cn/plugins/limit-count.md index 1768ebc638e60..718c6af49e73d 100644 --- a/doc/plugins/limit-count-cn.md +++ b/doc/zh-cn/plugins/limit-count.md @@ -17,7 +17,7 @@ # --> -[English](limit-count.md) +[English](../../plugins/limit-count.md) # limit-count @@ -31,13 +31,16 @@ |count |必选 |指定时间窗口内的请求数量阈值| |time_window |必选 |时间窗口的大小(以秒为单位),超过这个时间就会重置| |key |必选 |是用来做请求计数的依据,当前接受的 key 有: "remote_addr", "server_addr", "http_x_real_ip", "http_x_forwarded_for"。| -|rejected_code |可选 |T当请求超过阈值被拒绝时,返回的 HTTP 状态码,默认是 503| +|rejected_code |可选 |T当请求超过阈值被拒绝时,返回的 HTTP 状态码,默认 503。| |policy |可选 |用于检索和增加限制的速率限制策略。可选的值有:`local`(计数器被以内存方式保存在节点本地,默认选项) 和 `redis`(计数器保存在 Redis 服务节点上,从而可以跨节点共享结果,通常用它来完成全局限速).| |redis_host |可选 |当使用 `redis` 限速策略时,该属性是 Redis 服务节点的地址。| |redis_port |可选 |当使用 `redis` 限速策略时,该属性是 Redis 服务节点的端口,默认端口 6379。| |redis_password|可选 |当使用 `redis` 限速策略时,该属性是 Redis 服务节点的密码。| |redis_timeout |可选 |当使用 `redis` 限速策略时,该属性是 Redis 服务节点以毫秒为单位的超时时间,默认是 1000 ms(1 秒)。| + +**key 是可以被用户自定义的,只需要修改插件的一行代码即可完成。并没有在插件中放开是处于安全的考虑。** + ### 示例 #### 开启插件 @@ -66,10 +69,10 @@ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335 ``` 你可以使用浏览器打开 dashboard:`http://127.0.0.1:9080/apisix/dashboard/`,通过 web 界面来完成上面的操作,先增加一个 route: -![添加路由](../images/plugin/limit-count-1.png) +![添加路由](../../images/plugin/limit-count-1.png) 然后在 route 页面中添加 limit-count 插件: -![添加插件](../images/plugin/limit-count-2.png) +![添加插件](../../images/plugin/limit-count-2.png) 如果你需要一个集群级别的流量控制,我们可以借助 redis server 来完成。不同的 APISIX 节点之间将共享流量限速结果,实现集群流量限速。 diff --git a/doc/plugins/limit-req-cn.md b/doc/zh-cn/plugins/limit-req.md similarity index 75% rename from doc/plugins/limit-req-cn.md rename to doc/zh-cn/plugins/limit-req.md index b05148ca736a4..420359e2a4225 100644 --- a/doc/plugins/limit-req-cn.md +++ b/doc/zh-cn/plugins/limit-req.md @@ -17,7 +17,7 @@ # --> -# [English](limit-req.md) +# [English](../../plugins/limit-req.md) # limit-req @@ -25,10 +25,14 @@ ## 参数 -* `rate`:指定的请求速率(以秒为单位),请求速率超过 `rate` 但没有超过 (`rate` + `brust`)的请求会被加上延时 -* `burst`:请求速率超过 (`rate` + `brust`)的请求会被直接拒绝 -* `rejected_code`:当请求超过阈值被拒绝时,返回的 HTTP 状态码 -* `key`:是用来做请求计数的依据,当前接受的 key 有:"remote_addr"(客户端IP地址), "server_addr"(服务端 IP 地址), 请求头中的"X-Forwarded-For" 或 "X-Real-IP"。 +|名称 |可选项 |描述| +|--------- |--------|-----------| +|rate |必选|指定的请求速率(以秒为单位),请求速率超过 `rate` 但没有超过 (`rate` + `brust`)的请求会被加上延时。| +|burst |必选|请求速率超过 (`rate` + `brust`)的请求会被直接拒绝。| +| key |必选|是用来做请求计数的依据,当前接受的 key 有:"remote_addr"(客户端IP地址), "server_addr"(服务端 IP 地址), 请求头中的"X-Forwarded-For" 或 "X-Real-IP"。| +|rejected_code |可选|当请求超过阈值被拒绝时,返回的 HTTP 状态码,默认 503。| + +**key 是可以被用户自定义的,只需要修改插件的一行代码即可完成。并没有在插件中放开是处于安全的考虑。** ## 示例 @@ -60,11 +64,11 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 你可以使用浏览器打开 dashboard:`http://127.0.0.1:9080/apisix/dashboard/`,通过 web 界面来完成上面的操作,先增加一个 route: -![添加路由](../images/plugin/limit-req-1.png) +![添加路由](../../images/plugin/limit-req-1.png) 然后在 route 页面中添加 limit-req 插件: -![添加插件](../images/plugin/limit-req-2.png) +![添加插件](../../images/plugin/limit-req-2.png) ### 测试插件 diff --git a/doc/plugins/mqtt-proxy-cn.md b/doc/zh-cn/plugins/mqtt-proxy.md similarity index 98% rename from doc/plugins/mqtt-proxy-cn.md rename to doc/zh-cn/plugins/mqtt-proxy.md index 141e99897e262..d55a7d391d8e0 100644 --- a/doc/plugins/mqtt-proxy-cn.md +++ b/doc/zh-cn/plugins/mqtt-proxy.md @@ -17,7 +17,7 @@ # --> -[English](mqtt-proxy.md) +[English](../../plugins/mqtt-proxy.md) # 目录 diff --git a/doc/zh-cn/plugins/oauth.md b/doc/zh-cn/plugins/oauth.md new file mode 100644 index 0000000000000..3c6d0ec23ff13 --- /dev/null +++ b/doc/zh-cn/plugins/oauth.md @@ -0,0 +1,129 @@ + + +# 目录 + +- [**定义**](#定义) +- [**属性列表**](#属性列表) +- [**令牌自省**](#令牌自省) + +## 定义 + +OAuth 2 / Open ID Connect(OIDC)插件为 APISIX 提供身份验证和自省功能。 + +## 属性列表 + +|名称 |必选项 |描述| +|------- |----- |------| +|client_id |必要的 |OAuth 客户端 ID| +|client_secret |必要的 |OAuth 客户端 secret| +|discovery |必要的 |身份服务器的发现端点的 URL| +|realm |可选的 |用于认证的领域; 默认为apisix| +|bearer_only |可选的 |设置为“true”将检查请求中带有承载令牌的授权标头; 默认为`false`| +|logout_path |可选的 |默认是`/logout`| +|redirect_uri |可选的 |默认是 `ngx.var.request_uri`| +|timeout |可选的 |默认是 3 秒| +|ssl_verify |可选的 |默认是 `false`| +|introspection_endpoint |可选的 |身份服务器的令牌验证端点的 URL| +|introspection_endpoint_auth_method |可选的 |令牌自省的认证方法名称 | +|public_key |可选的 |验证令牌的公钥 | +|token_signing_alg_values_expected |可选的 |用于对令牌进行签名的算法 | + +### 令牌自省 + +令牌自省通过针对 Oauth 2 授权服务器验证令牌来帮助验证请求。 +前提条件是,您应该在身份服务器中创建受信任的客户端,并生成用于自省的有效令牌(JWT)。 +下图显示了通过网关进行令牌自省的示例(成功)流程。 + +![token introspection](../../images/plugin/oauth-1.png) + +以下是 curl 命令,用于将插件启用到外部服务。 +通过自省请求标头中提供的令牌,此路由将保护 https://httpbin.org/get(echo 服务)。 + +```bash +curl http://127.0.0.1:9080/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri":"/get", + "plugins":{ + "proxy-rewrite":{ + "scheme":"https" + }, + "openid-connect":{ + "client_id":"api_six_client_id", + "client_secret":"client_secret_code", + "discovery":"full_URL_of_the_discovery_endpoint", + "introspection_endpoint":"full_URL_of_introspection_endpoint", + "bearer_only":true, + "realm":"master", + "introspection_endpoint_auth_method":"client_secret_basic" + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "httpbin.org:443":1 + } + } +}' +``` + +以下命令可用于访问新路由。 + +```bash +curl -i -X GET http://127.0.0.1:9080/get -H "Host: httpbin.org" -H "Authorization: Bearer {replace_jwt_token}" +``` + +#### 公钥自省 + +您还可以提供 JWT 令牌的公钥来验证令牌。 如果您提供了公共密钥和令牌自省端点,则将执行公共密钥工作流,而不是通过身份服务器进行验证。如果要减少额外的网络呼叫并加快过程,可以使用此方法。 + +以下配置显示了如何向路由添加公钥自省。 + +```bash +curl http://127.0.0.1:9080/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri":"/get", + "plugins":{ + "proxy-rewrite":{ + "scheme":"https" + }, + "openid-connect":{ + "client_id":"api_six_client_id", + "client_secret":"client_secret_code", + "discovery":"full_URL_of_the_discovery_endpoint", + "bearer_only":true, + "realm":"master", + "token_signing_alg_values_expected":"RS256", + "public_key":"-----BEGIN CERTIFICATE----- + {public_key} + -----END CERTIFICATE-----" + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "httpbin.org:443":1 + } + } +}' +``` + +## 故障排除 + +如果 APISIX 无法解析/连接到身份提供者,请检查/修改DNS设置(`conf / config.yaml`)。 diff --git a/doc/plugins/prometheus-cn.md b/doc/zh-cn/plugins/prometheus.md similarity index 93% rename from doc/plugins/prometheus-cn.md rename to doc/zh-cn/plugins/prometheus.md index 1dd814285cf11..946d113d10a6c 100644 --- a/doc/plugins/prometheus-cn.md +++ b/doc/zh-cn/plugins/prometheus.md @@ -17,7 +17,7 @@ # --> -[English](prometheus.md) +[English](../../plugins/prometheus.md) # prometheus @@ -51,11 +51,11 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 你可以使用浏览器打开 dashboard:`http://127.0.0.1:9080/apisix/dashboard/`,通过 web 界面来完成上面的操作,先增加一个 route: -![](../images/plugin/prometheus-1.png) +![](../../images/plugin/prometheus-1.png) 然后在 route 页面中添加 prometheus 插件: -![](../images/plugin/prometheus-2.png) +![](../../images/plugin/prometheus-2.png) ## 如何提取指标数据 @@ -78,9 +78,9 @@ scrape_configs: 我们也可以在 prometheus 控制台中去检查状态: -![](../../doc/images/plugin/prometheus01.png) +![](../../images/plugin/prometheus01.png) -![](../../doc/images/plugin/prometheus02.png) +![](../../images/plugin/prometheus02.png) ### Grafana 面板 @@ -89,11 +89,11 @@ scrape_configs: 你可以到 [Grafana meta](https://grafana.com/grafana/dashboards/11719) 下载 `Grafana` 元数据. -![](../../doc/images/plugin/grafana_1.png) +![](../../images/plugin/grafana_1.png) -![](../../doc/images/plugin/grafana_2.png) +![](../../images/plugin/grafana_2.png) -![](../../doc/images/plugin/grafana_3.png) +![](../../images/plugin/grafana_3.png) ### 可有的指标 diff --git a/doc/plugins/proxy-cache-cn.md b/doc/zh-cn/plugins/proxy-cache.md similarity index 94% rename from doc/plugins/proxy-cache-cn.md rename to doc/zh-cn/plugins/proxy-cache.md index 95f3bdaf7d0f4..9381ea883ae9f 100644 --- a/doc/plugins/proxy-cache-cn.md +++ b/doc/zh-cn/plugins/proxy-cache.md @@ -17,7 +17,7 @@ # --> -[English](proxy-cache.md) +[English](../../plugins/proxy-cache.md) # proxy-cache @@ -48,7 +48,7 @@ 示例1:为特定路由启用 `proxy-cache` 插件: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -X PUT -d ' +curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "proxy-cache": { @@ -130,7 +130,7 @@ Server: APISIX web server 移除插件配置中相应的 JSON 配置可立即禁用该插件,无需重启服务: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -X PUT -d ' +curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "plugins": {}, diff --git a/doc/plugins/proxy-mirror-cn.md b/doc/zh-cn/plugins/proxy-mirror.md similarity index 89% rename from doc/plugins/proxy-mirror-cn.md rename to doc/zh-cn/plugins/proxy-mirror.md index 4b3f0730cc210..e4c4d3d3b77b8 100644 --- a/doc/plugins/proxy-mirror-cn.md +++ b/doc/zh-cn/plugins/proxy-mirror.md @@ -17,7 +17,7 @@ # --> -[English](proxy-mirror.md) +[English](../../plugins/proxy-mirror.md) # proxy-mirror @@ -38,7 +38,7 @@ 示例1:为特定路由启用 `proxy-mirror` 插件: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -X PUT -d ' +curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "proxy-mirror": { @@ -77,7 +77,7 @@ hello world 移除插件配置中相应的 JSON 配置可立即禁用该插件,无需重启服务: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -X PUT -d ' +curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "plugins": {}, diff --git a/doc/plugins/proxy-rewrite-cn.md b/doc/zh-cn/plugins/proxy-rewrite.md similarity index 98% rename from doc/plugins/proxy-rewrite-cn.md rename to doc/zh-cn/plugins/proxy-rewrite.md index f714ad5f4f5b3..45a6c95040ea5 100644 --- a/doc/plugins/proxy-rewrite-cn.md +++ b/doc/zh-cn/plugins/proxy-rewrite.md @@ -17,7 +17,7 @@ # --> -[English](proxy-rewrite.md) +[English](../../plugins/proxy-rewrite.md) # proxy-rewrite 上游代理信息重写插件。 diff --git a/doc/plugins/redirect-cn.md b/doc/zh-cn/plugins/redirect.md similarity index 74% rename from doc/plugins/redirect-cn.md rename to doc/zh-cn/plugins/redirect.md index ba7d94ce35eb6..ead153a444212 100644 --- a/doc/plugins/redirect-cn.md +++ b/doc/zh-cn/plugins/redirect.md @@ -17,7 +17,7 @@ # --> -[English](redirect.md) +[English](../../plugins/redirect.md) # redirect @@ -27,8 +27,9 @@ URI 重定向插件。 |名称 |必须|描述| |------- |-----|------| -|uri |是| 可以包含 Nginx 变量的 URI,例如:`/test/index.html`, `$uri/index.html`。你可以通过类似于 `$ {xxx}` 的方式引用变量,以避免产生歧义,例如:`${uri}foo/index.html`。若你需要保留 `$` 字符,那么使用如下格式:`/\$foo/index.html`。| -|ret_code|否|请求响应码,默认值为 `302`。| +|uri |是,与 `http_to_https` 二选一| 可以包含 Nginx 变量的 URI,例如:`/test/index.html`, `$uri/index.html`。你可以通过类似于 `$ {xxx}` 的方式引用变量,以避免产生歧义,例如:`${uri}foo/index.html`。若你需要保留 `$` 字符,那么使用如下格式:`/\$foo/index.html`。| +|ret_code|否,只和 `uri` 配置使用。|请求响应码,默认值为 `302`。| +|http_to_https|是,与 `uri` 二选一|布尔值,默认是 `false`。当设置为 `ture` 并且请求是 http 时,会自动 301 重定向为 https,uri 保持不变| ### 示例 @@ -94,6 +95,21 @@ Location: /test/default.html 我们可以检查响应码和响应头中的 `Location` 参数,它表示该插件已启用。 +``` + +下面是一个实现 http 到 https 跳转的示例: +```shell +curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/hello", + "plugins": { + "redirect": { + "http_to_https": true + } + } +}' +``` + #### 禁用插件 移除插件配置中相应的 JSON 配置可立即禁用该插件,无需重启服务: diff --git a/doc/plugins/response-rewrite-cn.md b/doc/zh-cn/plugins/response-rewrite.md similarity index 91% rename from doc/plugins/response-rewrite-cn.md rename to doc/zh-cn/plugins/response-rewrite.md index aa79bc2c5b9ae..83c5aac51c96c 100644 --- a/doc/plugins/response-rewrite-cn.md +++ b/doc/zh-cn/plugins/response-rewrite.md @@ -17,27 +17,29 @@ # --> -[English](response-rewrite.md) +[English](../../plugins/response-rewrite.md) + # response-rewrite 该插件支持修改上游服务返回的 body 和 header 信息。 使用场景: 1、可以设置 `Access-Control-Allow-*` 等 header 信息,来实现 CORS (跨域资源共享)的功能。 -2、另外也可以通过配置 status_code 和 header 里面的 Location 来实现重定向,当然如果只是需要重定向功能,最好使用 [redirect](redirect-cn.md) 插件。 +2、另外也可以通过配置 status_code 和 header 里面的 Location 来实现重定向,当然如果只是需要重定向功能,最好使用 [redirect](redirect.md) 插件。 + +## 配置参数 -#### 配置参数 |名字 |可选|说明| |------- |-----|------| -|status_code |可选| 修改上游返回状态码| +|status_code |可选| 修改上游返回状态码,默认保留原始响应代码。| |body |可选| 修改上游返回的 `body` 内容,如果设置了新内容,header 里面的 content-length 字段也会被去掉| |body_base64 |可选| 布尔类型,描述 `body` 字段是否需要 base64 解码之后再返回给客户端,用在某些图片和 Protobuffer 场景| |headers |可选| 返回给客户端的 `headers`,这里可以设置多个。头信息如果存在将重写,不存在则添加。想要删除某个 header 的话,把对应的值设置为空字符串即可| +## 示例 -### 示例 +### 开启插件 -#### 开启插件 下面是一个示例,在指定的 route 上开启了 `response rewrite` 插件: ```shell @@ -63,7 +65,8 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f1 }' ``` -#### 测试插件 +### 测试插件 + 基于上述配置进行测试: ```shell @@ -71,7 +74,8 @@ curl -X GET -i http://127.0.0.1:9080/test/index.html ``` 如果看到返回的头部信息和内容都被修改了,即表示 `response rewrite` 插件生效了。 -``` + +```shell HTTP/1.1 200 OK Date: Sat, 16 Nov 2019 09:15:12 GMT Transfer-Encoding: chunked diff --git a/doc/plugins/serverless-cn.md b/doc/zh-cn/plugins/serverless.md similarity index 98% rename from doc/plugins/serverless-cn.md rename to doc/zh-cn/plugins/serverless.md index fab9ad9e5a0b7..7ef432662f006 100644 --- a/doc/plugins/serverless-cn.md +++ b/doc/zh-cn/plugins/serverless.md @@ -17,7 +17,7 @@ # --> -[English](serverless.md) +[English](../../plugins/serverless.md) # serverless diff --git a/doc/zh-cn/plugins/skywalking.md b/doc/zh-cn/plugins/skywalking.md new file mode 100644 index 0000000000000..91ae3c1ebffb6 --- /dev/null +++ b/doc/zh-cn/plugins/skywalking.md @@ -0,0 +1,192 @@ + + +[English](../../plugins/skywalking.md) + +# 目录 +- [目录](#目录) + - [名字](#名字) + - [属性](#属性) + - [如何启用](#如何启用) + - [测试插件](#测试插件) + - [运行 Skywalking 实例](#运行-Skywalking-实例) + - [禁用插件](#禁用插件) + - [上游服务是java的SpringBoot示例代码](#上游服务是java的SpringBoot示例代码) + +## 名字 + +`Skywalking`(https://github.com/apache/skywalking) 是一个开源的服务跟踪插件。 + +服务端目前支持http和grpc两种协议,在apisix中目前只支持http协议 + +## 属性 + +* `endpoint`: Skywalking 的 http 节点,例如`http://127.0.0.1:12800`。 +* `sample_ratio`: 监听的比例,最小为0.00001,最大为1。 +* `service_name`: 可选参数,标记当前服务的名称,默认值是`APISIX`。 + +## 如何启用 + +下面是一个示例,在指定的 route 上开启了 skywalking 插件: + +```shell +curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "methods": ["GET"], + "uris": [ + "/uid/*" + ], + "plugins": { + "skywalking": { + "endpoint": "http://10.110.149.175:12800", + "sample_ratio": 1, + "service_name": "APISIX_SERVER" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "10.110.149.175:8089": 1 + } + } +}' +``` + +你可以使用浏览器打开 dashboard:`http://127.0.0.1:9080/apisix/dashboard/`,通过 web 界面来完成上面的操作,先增加一个 route: + +![](../../images/plugin/skywalking-1.png) + +然后在 route 页面中添加 skywalking 插件: + +![](../../images/plugin/skywalking-2.png) + +## 测试插件 + +### 运行 Skywalking 实例 + +#### 例子: +1. 启动Skywalking Server: + - 默认使用H2存储,直接启动skywalking即可 + ``` + sudo docker run --name skywalking -d -p 1234:1234 -p 11800:11800 -p 12800:12800 --restart always apache/skywalking-oap-server + ``` + + - 如果使用elasticsearch存储 + 1. 则需要先安装elasticsearch: + ``` + sudo docker run -d --name elasticsearch -p 9200:9200 -p 9300:9300 --restart always -e "discovery.type=single-node" elasticsearch:6.7.2 + + ``` + 2. 安装 ElasticSearch管理界面elasticsearch-hq + ``` + sudo docker run -d --name elastic-hq -p 5000:5000 --restart always elastichq/elasticsearch-hq + ``` + 3. 启动skywalking: + ``` + sudo docker run --name skywalking -d -p 1234:1234 -p 11800:11800 -p 12800:12800 --restart always --link elasticsearch:elasticsearch -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server + ``` +2. Skywalking管理系统: + 1. 启动管理系统: + ``` + sudo docker run --name skywalking-ui -d -p 8080:8080 --link skywalking:skywalking -e SW_OAP_ADDRESS=skywalking:12800 --restart always apache/skywalking-ui + ``` + 2. 打开管理页面 + 在浏览器里面输入http://10.110.149.175:8080,出现了如下界面,则表示安装成功 + ![](../../images/plugin/skywalking-3.png) + +3. 测试示例: + - 通过访问apisix,访问上游服务 + + ```bash + $ curl -v http://10.110.149.192:9080/uid/12 + HTTP/1.1 200 OK + OK + ... + ``` + - 打开浏览器,访问 Skywalking 的 web 页面: + ``` + http://10.110.149.175:8080/ + ``` + 可以看到访问拓扑图\ + ![](../../images/plugin/skywalking-4.png)\ + 可以看到服务追踪图\ + ![](../../images/plugin/skywalking-5.png) +## 禁用插件 + +当你想去掉插件的时候,很简单,在插件的配置中把对应的 json 配置删除即可,无须重启服务,即刻生效: + +```shell +$ curl http://127.0.0.1:2379/v2/keys/apisix/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value=' +{ + "methods": ["GET"], + "uris": [ + "/uid/*" + ], + "plugins": { + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "10.110.149.175:8089": 1 + } + } +}' +``` + +现在就已经移除了 Skywalking 插件了。其他插件的开启和移除也是同样的方法。 + + +## 上游服务是java的SpringBoot示例代码 + +```java +package com.lenovo.ai.controller; + +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RestController; +import javax.servlet.http.HttpServletRequest; + +/** + * @author cyxinda + * @create 2020-05-29 14:02 + * @desc skywalking测试中央控制层 + **/ +@RestController +public class TestController { + @RequestMapping("/uid/{count}") + public String getUidList(@PathVariable("count") String countStr, HttpServletRequest request) { + System.out.println("counter:::::"+countStr); + return "OK"; + } +} +``` +启动服务的时候,需要配置skywalking agent, +修改agent/config/agent.config中的配置 +``` +agent.service_name=yourservername +collector.backend_service=10.110.149.175:11800 +``` +启动服务脚本: +``` +nohup java -javaagent:/root/skywalking/app/agent/skywalking-agent.jar \ +-jar /root/skywalking/app/app.jar \ +--server.port=8089 \ +2>&1 > /root/skywalking/app/logs/nohup.log & +``` + diff --git a/doc/zh-cn/plugins/syslog.md b/doc/zh-cn/plugins/syslog.md new file mode 100644 index 0000000000000..486d1606424cb --- /dev/null +++ b/doc/zh-cn/plugins/syslog.md @@ -0,0 +1,105 @@ + + +# 摘要 +- [**定义**](#name) +- [**属性列表**](#attributes) +- [**如何开启**](#how-to-enable) +- [**测试插件**](#test-plugin) +- [**禁用插件**](#disable-plugin) + + +## 定义 + +`sys` 是一个将Log data请求推送到Syslog的插件。 + +这将提供将Log数据请求作为JSON对象发送的功能。 + +## 属性列表 + +|属性名称 |必选项 |描述| +|--------- |-------- |-----------| +|host |必要的 |IP地址或主机名。| +|port |必要的 |目标上游端口。| +|timeout |可选的 |上游发送数据超时。| +|tls |可选的 |布尔值,用于控制是否执行SSL验证。| +|flush_limit |可选的 |如果缓冲的消息的大小加上当前消息的大小达到(> =)此限制(以字节为单位),则缓冲的日志消息将被写入日志服务器。默认为4096(4KB)。| +|drop_limit |可选的 |如果缓冲的消息的大小加上当前消息的大小大于此限制(以字节为单位),则由于缓冲区大小有限,当前的日志消息将被丢弃。默认drop_limit为1048576(1MB)。| +|sock_type|可选的 |用于传输层的IP协议类型。可以是“ tcp”或“ udp”。默认值为“ tcp”。| +|max_retry_times|可选的 |连接到日志服务器失败或将日志消息发送到日志服务器失败后的最大重试次数。| +|retry_interval|可选的 |重试连接到日志服务器或重试向日志服务器发送日志消息之前的时间延迟(以毫秒为单位),默认为100(0.1s)。| +|pool_size |可选的 |sock:keepalive使用的Keepalive池大小。默认为10。| + +## 如何开启 + +1. 下面例子展示了如何为指定路由开启 `sys-logger` 插件的。 + +```shell +curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "username": "foo", + "plugins": { + "plugins": { + "syslog": { + "host" : "127.0.0.1", + "port" : 5044, + "flush_limit" : 1 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" + } +}' +``` + +## 测试插件 + +* 成功的情况: + +```shell +$ curl -i http://127.0.0.1:9080/hello +HTTP/1.1 200 OK +... +hello, world +``` + +## 禁用插件 + + +想要禁用“sys-logger”插件,是非常简单的,将对应的插件配置从json配置删除,就会立即生效,不需要重新启动服务: + +```shell +$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value=' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/doc/plugins/tcp-logger-cn.md b/doc/zh-cn/plugins/tcp-logger.md similarity index 84% rename from doc/plugins/tcp-logger-cn.md rename to doc/zh-cn/plugins/tcp-logger.md index 27da9ffce738b..652f2afadabbb 100644 --- a/doc/plugins/tcp-logger-cn.md +++ b/doc/zh-cn/plugins/tcp-logger.md @@ -18,19 +18,24 @@ --> # 摘要 + - [**定义**](#name) - [**属性列表**](#attributes) - [**如何开启**](#how-to-enable) - [**测试插件**](#test-plugin) - [**禁用插件**](#disable-plugin) - ## 定义 `tcp-logger` 是用于将日志数据发送到TCP服务的插件。 以实现将日志数据以JSON格式发送到监控工具或其它TCP服务的能力。 +该插件提供了将Log Data作为批处理推送到外部TCP服务器的功能。如果您没有收到日志数据,请放心一些时间,它会在我们的批处理处理器中的计时器功能到期后自动发送日志。 + +有关Apache APISIX中Batch-Processor的更多信息,请参考。 +[Batch-Processor](../batch-processor.md) + ## 属性列表 |属性名称 |必选项 |描述| @@ -41,7 +46,6 @@ | tls |可选的|布尔值,用于控制是否执行SSL验证。| | tls_options |可选的|TLS 选项| - ## 如何开启 1. 下面例子展示了如何为指定路由开启 `tcp-logger` 插件的。 @@ -79,11 +83,10 @@ hello, world ## 禁用插件 - 想要禁用“tcp-logger”插件,是非常简单的,将对应的插件配置从json配置删除,就会立即生效,不需要重新启动服务: ```shell -$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -X PUT -d value=' +$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value=' { "methods": ["GET"], "uri": "/hello", diff --git a/doc/plugins/udp-logger-cn.md b/doc/zh-cn/plugins/udp-logger.md similarity index 84% rename from doc/plugins/udp-logger-cn.md rename to doc/zh-cn/plugins/udp-logger.md index 148a2afd52422..f129dc2f8b645 100644 --- a/doc/plugins/udp-logger-cn.md +++ b/doc/zh-cn/plugins/udp-logger.md @@ -18,19 +18,24 @@ --> # 摘要 + - [**定义**](#name) - [**属性列表**](#attributes) - [**如何开启**](#how-to-enable) - [**测试插件**](#test-plugin) - [**禁用插件**](#disable-plugin) - ## 定义 `udp-logger` 是用于将日志数据发送到UDP服务的插件。 以实现将日志数据以JSON格式发送到监控工具或其它UDP服务的能力。 +此插件提供了将批处理数据批量推送到外部UDP服务器的功能。如果您没有收到日志数据,请放心一些时间,它会在我们的批处理处理器中的计时器功能到期后自动发送日志 + +有关Apache APISIX中Batch-Processor的更多信息,请参考。 +[Batch-Processor](../../batch-processor.md) + ## 属性列表 |属性名称 |必选项 |描述| @@ -39,7 +44,6 @@ | port |必要的| 目标端口。| | timeout |可选的|发送数据超时间。| - ## 如何开启 1. 下面例子展示了如何为指定路由开启 `udp-logger` 插件的。 @@ -77,11 +81,10 @@ hello, world ## 禁用插件 - 想要禁用“udp-logger”插件,是非常简单的,将对应的插件配置从json配置删除,就会立即生效,不需要重新启动服务: ```shell -$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -X PUT -d value=' +$ curl http://127.0.0.1:2379/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value=' { "methods": ["GET"], "uri": "/hello", diff --git a/doc/plugins/wolf-rbac-cn.md b/doc/zh-cn/plugins/wolf-rbac.md similarity index 98% rename from doc/plugins/wolf-rbac-cn.md rename to doc/zh-cn/plugins/wolf-rbac.md index 1b9996c91eccc..0ed3f5cd42fc3 100644 --- a/doc/plugins/wolf-rbac-cn.md +++ b/doc/zh-cn/plugins/wolf-rbac.md @@ -17,7 +17,7 @@ # --> -[English](wolf-rbac.md) +[English](../../plugins/wolf-rbac.md) # 目录 @@ -70,10 +70,10 @@ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f ``` 你可以使用浏览器打开 dashboard:`http://127.0.0.1:9080/apisix/dashboard/`,通过 web 界面来完成上面的操作,先增加一个 consumer: -![](../images/plugin/wolf-rbac-1.png) +![](../../images/plugin/wolf-rbac-1.png) 然后在 consumer 页面中添加 wolf-rbac 插件: -![](../images/plugin/wolf-rbac-2.png) +![](../../images/plugin/wolf-rbac-2.png) 注意: 上面填写的 `appid` 需要在wolf控制台中已经存在的. diff --git a/doc/plugins/zipkin-cn.md b/doc/zh-cn/plugins/zipkin.md similarity index 93% rename from doc/plugins/zipkin-cn.md rename to doc/zh-cn/plugins/zipkin.md index ad53dc2aa569b..fb7fc096a34ca 100644 --- a/doc/plugins/zipkin-cn.md +++ b/doc/zh-cn/plugins/zipkin.md @@ -17,7 +17,7 @@ # --> -[English](zipkin.md) +[English](../../plugins/zipkin.md) # 目录 - [**名字**](#名字) @@ -67,11 +67,11 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f1 你可以使用浏览器打开 dashboard:`http://127.0.0.1:9080/apisix/dashboard/`,通过 web 界面来完成上面的操作,先增加一个 route: -![](../images/plugin/zipkin-1.png) +![](../../images/plugin/zipkin-1.png) 然后在 route 页面中添加 zipkin 插件: -![](../images/plugin/zipkin-2.png) +![](../../images/plugin/zipkin-2.png) ## 测试插件 @@ -97,16 +97,16 @@ HTTP/1.1 200 OK http://127.0.0.1:9411/zipkin ``` -![](../../doc/images/plugin/zipkin-1.jpg) +![](../../images/plugin/zipkin-1.jpg) -![](../../doc/images/plugin/zipkin-2.jpg) +![](../../images/plugin/zipkin-2.jpg) ## 禁用插件 当你想去掉插件的时候,很简单,在插件的配置中把对应的 json 配置删除即可,无须重启服务,即刻生效: ```shell -$ curl http://127.0.0.1:2379/v2/keys/apisix/routes/1 -X PUT -d value=' +$ curl http://127.0.0.1:2379/v2/keys/apisix/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d value=' { "methods": ["GET"], "uri": "/index.html", diff --git a/doc/profile-cn.md b/doc/zh-cn/profile.md similarity index 100% rename from doc/profile-cn.md rename to doc/zh-cn/profile.md diff --git a/doc/stand-alone-cn.md b/doc/zh-cn/stand-alone.md similarity index 99% rename from doc/stand-alone-cn.md rename to doc/zh-cn/stand-alone.md index 0da4b09120023..34127ce0c2de7 100644 --- a/doc/stand-alone-cn.md +++ b/doc/zh-cn/stand-alone.md @@ -17,7 +17,7 @@ # --> -[English](stand-alone.md) +[English](../stand-alone.md) ## Stand-alone mode diff --git a/doc/stream-proxy-cn.md b/doc/zh-cn/stream-proxy.md similarity index 96% rename from doc/stream-proxy-cn.md rename to doc/zh-cn/stream-proxy.md index 0a413d73b3bf5..afb9fd2988262 100644 --- a/doc/stream-proxy-cn.md +++ b/doc/zh-cn/stream-proxy.md @@ -17,7 +17,7 @@ # --> -[English](stream-proxy.md) +[English](../stream-proxy.md) # Stream 代理 @@ -59,7 +59,7 @@ curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f03 ``` 例子中 APISIX 对客户端IP为 `127.0.0.1` 的请求代理转发到上游主机 `127.0.0.1:1995`。 -更多用例,请参照 [test case](../t/stream-node/sanity.t). +更多用例,请参照 [test case](../../t/stream-node/sanity.t). ## 更多限制选项 diff --git a/kubernetes/README.md b/kubernetes/README.md index 3d914e7d3046d..30f299d99415b 100644 --- a/kubernetes/README.md +++ b/kubernetes/README.md @@ -16,6 +16,12 @@ # limitations under the License. # --> +### kubernetes + +There are some yaml files for deploying apisix in Kubernetes. + +### Prerequisites +- Install etcd ### Usage diff --git a/kubernetes/apisix-gw-config-cm.yaml b/kubernetes/apisix-gw-config-cm.yaml index 67833f09a21e5..05c554e2aa385 100644 --- a/kubernetes/apisix-gw-config-cm.yaml +++ b/kubernetes/apisix-gw-config-cm.yaml @@ -144,6 +144,7 @@ data: - fault-injection - udp-logger - wolf-rbac + - consumer-restriction stream_plugins: - mqtt-proxy diff --git a/rockspec/apisix-1.3-0.rockspec b/rockspec/apisix-1.3-0.rockspec new file mode 100644 index 0000000000000..99dddc51f4a1d --- /dev/null +++ b/rockspec/apisix-1.3-0.rockspec @@ -0,0 +1,72 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +package = "apisix" +version = "1.3-0" +supported_platforms = {"linux", "macosx"} + +source = { + url = "git://github.com/apache/incubator-apisix", + tag = "1.3", +} + +description = { + summary = "Apache APISIX(incubating) is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.", + homepage = "https://github.com/apache/incubator-apisix", + license = "Apache License 2.0", +} + +dependencies = { + "lua-resty-template = 1.9", + "lua-resty-etcd = 0.9", + "lua-resty-balancer = 0.02rc5", + "lua-resty-ngxvar = 0.5", + "lua-resty-jit-uuid = 0.0.7", + "lua-resty-healthcheck-api7 = 2.2.0", + "lua-resty-jwt = 0.2.0", + "lua-resty-cookie = 0.1.0", + "lua-resty-session = 2.24", + "opentracing-openresty = 0.1", + "lua-resty-radixtree = 1.8", + "lua-protobuf = 0.3.1", + "lua-resty-openidc = 1.7.2-1", + "luafilesystem = 1.7.0-2", + "lua-tinyyaml = 0.1", + "lua-resty-prometheus = 1.0", + "jsonschema = 0.8", + "lua-resty-ipmatcher = 0.6", + "lua-resty-kafka = 0.07", + "lua-resty-logger-socket = 2.0-0", +} + +build = { + type = "make", + build_variables = { + CFLAGS="$(CFLAGS)", + LIBFLAG="$(LIBFLAG)", + LUA_LIBDIR="$(LUA_LIBDIR)", + LUA_BINDIR="$(LUA_BINDIR)", + LUA_INCDIR="$(LUA_INCDIR)", + LUA="$(LUA)", + }, + install_variables = { + INST_PREFIX="$(PREFIX)", + INST_BINDIR="$(BINDIR)", + INST_LIBDIR="$(LIBDIR)", + INST_LUADIR="$(LUADIR)", + INST_CONFDIR="$(CONFDIR)", + }, +} diff --git a/rockspec/apisix-1.4-0.rockspec b/rockspec/apisix-1.4-0.rockspec new file mode 100644 index 0000000000000..f9bb44d0ef270 --- /dev/null +++ b/rockspec/apisix-1.4-0.rockspec @@ -0,0 +1,74 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +package = "apisix" +version = "1.4-0" +supported_platforms = {"linux", "macosx"} + +source = { + url = "git://github.com/apache/incubator-apisix", + tag = "1.4", +} + +description = { + summary = "Apache APISIX(incubating) is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.", + homepage = "https://github.com/apache/incubator-apisix", + license = "Apache License 2.0", +} + +dependencies = { + "lua-resty-template = 1.9", + "lua-resty-etcd = 1.0", + "lua-resty-balancer = 0.02rc5", + "lua-resty-ngxvar = 0.5", + "lua-resty-jit-uuid = 0.0.7", + "lua-resty-healthcheck-api7 = 2.2.0", + "lua-resty-jwt = 0.2.0", + "lua-resty-cookie = 0.1.0", + "lua-resty-session = 2.24", + "opentracing-openresty = 0.1", + "lua-resty-radixtree = 1.9", + "lua-protobuf = 0.3.1", + "lua-resty-openidc = 1.7.2-1", + "luafilesystem = 1.7.0-2", + "lua-tinyyaml = 0.1", + "lua-resty-prometheus = 1.1", + "jsonschema = 0.8", + "lua-resty-ipmatcher = 0.6", + "lua-resty-kafka = 0.07", + "lua-resty-logger-socket = 2.0-0", + "skywalking-nginx-lua-plugin = 1.0-0", +} + +build = { + type = "make", + build_variables = { + CFLAGS="$(CFLAGS)", + LIBFLAG="$(LIBFLAG)", + LUA_LIBDIR="$(LUA_LIBDIR)", + LUA_BINDIR="$(LUA_BINDIR)", + LUA_INCDIR="$(LUA_INCDIR)", + LUA="$(LUA)", + }, + install_variables = { + INST_PREFIX="$(PREFIX)", + INST_BINDIR="$(BINDIR)", + INST_LIBDIR="$(LIBDIR)", + INST_LUADIR="$(LUADIR)", + INST_CONFDIR="$(CONFDIR)", + }, +} diff --git a/rockspec/apisix-master-0.rockspec b/rockspec/apisix-master-0.rockspec index f52af72a14c61..ef09a66e197e7 100644 --- a/rockspec/apisix-master-0.rockspec +++ b/rockspec/apisix-master-0.rockspec @@ -32,7 +32,7 @@ description = { dependencies = { "lua-resty-template = 1.9", - "lua-resty-etcd = 0.9", + "lua-resty-etcd = 1.0", "lua-resty-balancer = 0.02rc5", "lua-resty-ngxvar = 0.5", "lua-resty-jit-uuid = 0.0.7", @@ -41,15 +41,17 @@ dependencies = { "lua-resty-cookie = 0.1.0", "lua-resty-session = 2.24", "opentracing-openresty = 0.1", - "lua-resty-radixtree = 1.8", + "lua-resty-radixtree = 2.0", "lua-protobuf = 0.3.1", "lua-resty-openidc = 1.7.2-1", "luafilesystem = 1.7.0-2", "lua-tinyyaml = 0.1", - "lua-resty-prometheus = 1.0", + "lua-resty-prometheus = 1.1", "jsonschema = 0.8", "lua-resty-ipmatcher = 0.6", "lua-resty-kafka = 0.07", + "lua-resty-logger-socket = 2.0-0", + "skywalking-nginx-lua-plugin = 1.0-0", } build = { diff --git a/t/APISIX.pm b/t/APISIX.pm index b8aa664bf9419..0b93fb28064fd 100644 --- a/t/APISIX.pm +++ b/t/APISIX.pm @@ -72,11 +72,21 @@ if ($enable_local_dns) { my $yaml_config = read_file("conf/config.yaml"); my $ssl_crt = read_file("conf/cert/apisix.crt"); my $ssl_key = read_file("conf/cert/apisix.key"); +my $test2_crt = read_file("conf/cert/test2.crt"); +my $test2_key = read_file("conf/cert/test2.key"); $yaml_config =~ s/node_listen: 9080/node_listen: 1984/; $yaml_config =~ s/enable_heartbeat: true/enable_heartbeat: false/; $yaml_config =~ s/ # stream_proxy:/ stream_proxy:\n tcp:\n - 9100/; $yaml_config =~ s/admin_key:/disable_admin_key:/; +my $etcd_enable_auth = $ENV{"ETCD_ENABLE_AUTH"} || "false"; + +if ($etcd_enable_auth eq "true") { + $yaml_config =~ s/ # user:/ user:/; + $yaml_config =~ s/ # password:/ password:/; +} + + my $profile = $ENV{"APISIX_PROFILE"}; @@ -100,7 +110,7 @@ add_block_preprocessor(sub { my $main_config = $block->main_config // <<_EOC_; worker_rlimit_core 500M; -working_directory $apisix_home; +env ENABLE_ETCD_AUTH; env APISIX_PROFILE; _EOC_ @@ -199,6 +209,7 @@ _EOC_ lua_shared_dict upstream-healthcheck 32m; lua_shared_dict worker-events 10m; lua_shared_dict lrucache-lock 10m; + lua_shared_dict skywalking-tracing-buffer 100m; resolver $dns_addrs_str; resolver_timeout 5; @@ -417,6 +428,10 @@ $user_yaml_config $ssl_crt >>> ../conf/cert/apisix.key $ssl_key +>>> ../conf/cert/test2.crt +$test2_crt +>>> ../conf/cert/test2.key +$test2_key $user_apisix_yaml _EOC_ diff --git a/t/admin/balancer.t b/t/admin/balancer.t index 0be70dbd47bff..1afcedafa214a 100644 --- a/t/admin/balancer.t +++ b/t/admin/balancer.t @@ -26,17 +26,20 @@ add_block_preprocessor(sub { my $init_by_lua_block = <<_EOC_; require "resty.core" apisix = require("apisix") + core = require("apisix.core") apisix.http_init() function test(route, ctx, count) local balancer = require("apisix.balancer") local res = {} for i = 1, count or 12 do - local host, port, err = balancer.pick_server(route, ctx) + local server, err = balancer.pick_server(route, ctx) if err then ngx.say("failed: ", err) end - res[host] = (res[host] or 0) + 1 + + core.log.warn("host: ", server.host, " port: ", server.port) + res[server.host] = (res[server.host] or 0) + 1 end local keys = {} @@ -61,20 +64,18 @@ __DATA__ --- config location /t { content_by_lua_block { - local route = { - value = { - upstream = { - nodes = { - ["39.97.63.215:80"] = 1, - ["39.97.63.216:81"] = 1, - ["39.97.63.217:82"] = 1, - }, - type = "roundrobin", - }, - id = 1 - } + local up_conf = { + type = "roundrobin", + nodes = { + {host = "39.97.63.215", port = 80, weight = 1}, + {host = "39.97.63.216", port = 81, weight = 1}, + {host = "39.97.63.217", port = 82, weight = 1}, } + } local ctx = {conf_version = 1} + ctx.upstream_conf = up_conf + ctx.upstream_version = "ver" + ctx.upstream_key = up_conf.type .. "#route_" .. "id" test(route, ctx) } @@ -94,23 +95,18 @@ host: 39.97.63.217 count: 4 --- config location /t { content_by_lua_block { - local core = require("apisix.core") - local balancer = require("apisix.balancer") - - local route = { - value = { - upstream = { - nodes = { - ["39.97.63.215:80"] = 1, - ["39.97.63.216:81"] = 2, - ["39.97.63.217:82"] = 3, - }, - type = "roundrobin", - }, - id = 1 - } + local up_conf = { + type = "roundrobin", + nodes = { + {host = "39.97.63.215", port = 80, weight = 1}, + {host = "39.97.63.216", port = 81, weight = 2}, + {host = "39.97.63.217", port = 82, weight = 3}, } + } local ctx = {conf_version = 1} + ctx.upstream_conf = up_conf + ctx.upstream_version = "ver" + ctx.upstream_key = up_conf.type .. "#route_" .. "id" test(route, ctx) } @@ -130,33 +126,30 @@ host: 39.97.63.217 count: 6 --- config location /t { content_by_lua_block { - local balancer = require("apisix.balancer") - - local route = { - value = { - upstream = { - nodes = { - ["39.97.63.215:80"] = 1, - ["39.97.63.216:81"] = 1, - ["39.97.63.217:82"] = 1, - }, - type = "roundrobin", - }, - id = 1 - } + local up_conf = { + type = "roundrobin", + nodes = { + {host = "39.97.63.215", port = 80, weight = 1}, + {host = "39.97.63.216", port = 81, weight = 1}, + {host = "39.97.63.217", port = 82, weight = 1}, } - local ctx = {conf_version = 1} + } + local ctx = {} + ctx.upstream_conf = up_conf + ctx.upstream_version = 1 + ctx.upstream_key = up_conf.type .. "#route_" .. "id" test(route, ctx) -- cached by version - route.value.upstream.nodes = { - ["39.97.63.218:83"] = 1, + up_conf.nodes = { + {host = "39.97.63.218", port = 80, weight = 1}, + {host = "39.97.63.219", port = 80, weight = 0}, } test(route, ctx) -- update, version changed - ctx = {conf_version = 2} + ctx.upstream_version = 2 test(route, ctx) } } @@ -179,37 +172,33 @@ host: 39.97.63.218 count: 12 --- config location /t { content_by_lua_block { - local route = { - value = { - upstream = { - nodes = { - ["39.97.63.215:80"] = 1, - ["39.97.63.216:81"] = 1, - ["39.97.63.217:82"] = 1, - }, - type = "chash", - key = "remote_addr", - }, - id = 1 - } + local up_conf = { + type = "chash", + key = "remote_addr", + nodes = { + {host = "39.97.63.215", port = 80, weight = 1}, + {host = "39.97.63.216", port = 81, weight = 1}, + {host = "39.97.63.217", port = 82, weight = 1}, } + } local ctx = { - conf_version = 1, - var = { - remote_addr = "127.0.0.1" - } + var = {remote_addr = "127.0.0.1"}, } + ctx.upstream_conf = up_conf + ctx.upstream_version = 1 + ctx.upstream_key = up_conf.type .. "#route_" .. "id" test(route, ctx) -- cached by version - route.value.upstream.nodes = { - ["39.97.63.218:83"] = 1, + up_conf.nodes = { + {host = "39.97.63.218", port = 80, weight = 1}, + {host = "39.97.63.219", port = 80, weight = 0}, } test(route, ctx) -- update, version changed - ctx.conf_version = 2 + ctx.upstream_version = 2 test(route, ctx) } } @@ -221,3 +210,41 @@ host: 39.97.63.215 count: 12 host: 39.97.63.218 count: 12 --- no_error_log [error] + + + +=== TEST 5: return item directly if only have one item in `nodes` +--- config + location /t { + content_by_lua_block { + local up_conf = { + type = "roundrobin", + nodes = { + {host = "39.97.63.215", port = 80, weight = 1}, + {host = "39.97.63.216", port = 81, weight = 1}, + {host = "39.97.63.217", port = 82, weight = 1}, + } + } + local ctx = {} + ctx.upstream_conf = up_conf + ctx.upstream_version = 1 + ctx.upstream_key = up_conf.type .. "#route_" .. "id" + + test(route, ctx) + + -- one item in nodes, return it directly + up_conf.nodes = { + {host = "39.97.63.218", port = 80, weight = 1}, + } + test(route, ctx) + } + } +--- request +GET /t +--- response_body +host: 39.97.63.215 count: 4 +host: 39.97.63.216 count: 4 +host: 39.97.63.217 count: 4 +host: 39.97.63.218 count: 12 +--- no_error_log +[error] diff --git a/t/admin/global-rules.t b/t/admin/global-rules.t index aa7a0c600ea1d..2cda952f205cd 100644 --- a/t/admin/global-rules.t +++ b/t/admin/global-rules.t @@ -164,16 +164,17 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/global_rules/1/plugins', + local code, body = t('/apisix/admin/global_rules/1', ngx.HTTP_PATCH, [[{ + "plugins": { "limit-count": { "count": 3, "time_window": 60, "rejected_code": 503, "key": "remote_addr" } - }]], + }}]], [[{ "node": { "value": { @@ -308,3 +309,59 @@ GET /t {"error_msg":"invalid configuration: property \"plugins\" is required"} --- no_error_log [error] + + + +=== TEST 9: string id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/a-b-c-ABC_0123', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 10: string id(DELETE) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/a-b-c-ABC_0123', + ngx.HTTP_DELETE + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] diff --git a/t/admin/plugins.t b/t/admin/plugins.t index 11939872ff4c0..784b5d23cef76 100644 --- a/t/admin/plugins.t +++ b/t/admin/plugins.t @@ -30,7 +30,7 @@ __DATA__ --- request GET /apisix/admin/plugins/list --- response_body_like eval -qr/\["limit-req","limit-count","limit-conn","key-auth","basic-auth","prometheus","node-status","jwt-auth","zipkin","ip-restriction","grpc-transcode","serverless-pre-function","serverless-post-function","openid-connect","proxy-rewrite","redirect","response-rewrite","fault-injection","udp-logger","wolf-rbac","proxy-cache","tcp-logger","proxy-mirror","kafka-logger","cors","batch-requests"\]/ +qr/\["fault-injection","serverless-pre-function","batch-requests","cors","ip-restriction","uri-blocker","openid-connect","wolf-rbac","basic-auth","jwt-auth","key-auth","consumer-restriction","authz-keycloak","proxy-mirror","proxy-cache","proxy-rewrite","limit-conn","limit-count","limit-req","node-status","redirect","response-rewrite","grpc-transcode","prometheus","echo","http-logger","tcp-logger","kafka-logger","syslog","udp-logger","zipkin","skywalking","serverless-post-function"\]/ --- no_error_log [error] @@ -51,7 +51,7 @@ GET /apisix/admin/plugins --- request GET /apisix/admin/plugins/limit-req --- response_body -{"properties":{"rate":{"minimum":0,"type":"number"},"burst":{"minimum":0,"type":"number"},"key":{"enum":["remote_addr","server_addr","http_x_real_ip","http_x_forwarded_for"],"type":"string"},"rejected_code":{"minimum":200,"type":"integer"}},"required":["rate","burst","key","rejected_code"],"type":"object"} +{"properties":{"rate":{"minimum":0,"type":"number"},"burst":{"minimum":0,"type":"number"},"key":{"enum":["remote_addr","server_addr","http_x_real_ip","http_x_forwarded_for"],"type":"string"},"rejected_code":{"type":"integer","default":503,"minimum":200}},"required":["rate","burst","key"],"type":"object"} --- no_error_log [error] diff --git a/t/admin/routes-array-nodes.t b/t/admin/routes-array-nodes.t new file mode 100644 index 0000000000000..c9b141883a28d --- /dev/null +++ b/t/admin/routes-array-nodes.t @@ -0,0 +1,125 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + }, + "desc": "new route", + "uri": "/index.html" + }]], + [[{ + "node": { + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "desc": "new route", + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + } + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 2: get route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_GET, + nil, + [[{ + "node": { + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "desc": "new route", + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + } + }, + "key": "/apisix/routes/1" + }, + "action": "get" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] diff --git a/t/admin/routes.t b/t/admin/routes.t index 78ab5c62fb6bb..0e0d198892a74 100644 --- a/t/admin/routes.t +++ b/t/admin/routes.t @@ -993,9 +993,11 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1/methods', + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PATCH, - '["GET"]', + [[{ + "methods": ["GET", null, null, null, null, null, null, null, null] + }]], [[{ "node": { "value": { @@ -1027,9 +1029,11 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1/uri', + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PATCH, - '"/patch_test"', + [[{ + "uri": "/patch_test" + }]], [[{ "node": { "value": { @@ -1054,23 +1058,22 @@ passed -=== TEST 30: patch route(whole) +=== TEST 30: patch route(multi) --- config location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1/', + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PATCH, [[{ "methods": ["GET"], "upstream": { "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "127.0.0.1:8080": null, + "127.0.0.2:8080": 1 + } }, - "desc": "new route", - "uri": "/index.html" + "desc": "new route" }]], [[{ "node": { @@ -1078,11 +1081,11 @@ passed "methods": [ "GET" ], - "uri": "/index.html", + "uri": "/patch_test", "desc": "new route", "upstream": { "nodes": { - "127.0.0.1:8080": 1 + "127.0.0.2:8080": 1 }, "type": "roundrobin" } @@ -1727,3 +1730,158 @@ GET /t passed --- no_error_log [error] + + + +=== TEST 47: set route(id: 1 + name: test name) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "name": "test name", + "uri": "/index.html" + }]], + [[{ + "node": { + "value": { + "name": "test name" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 48: string id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/a-b-c-ABC_0123', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 49: string id(delete) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/a-b-c-ABC_0123', + ngx.HTTP_DELETE + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 50: invalid string id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/*invalid', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- no_error_log +[error] + + + +=== TEST 51: Verify Response Content-Type=applciation/json +--- config + location /t { + content_by_lua_block { + local http = require("resty.http") + local httpc = http.new() + httpc:set_timeout(500) + httpc:connect(ngx.var.server_addr, ngx.var.server_port) + local res, err = httpc:request( + { + path = '/apisix/admin/routes/1?ttl=1', + method = "GET", + } + ) + + ngx.header["Content-Type"] = res.headers["Content-Type"] + ngx.status = 200 + ngx.say("passed") + } + } +--- request +GET /t +--- response_headers +Content-Type: application/json diff --git a/t/admin/schema.t b/t/admin/schema.t index 54ef58ee7ee77..d98b491d79a53 100644 --- a/t/admin/schema.t +++ b/t/admin/schema.t @@ -93,9 +93,23 @@ location /t { sni = { type = "string", pattern = [[^\*?[0-9a-zA-Z-.]+$]], - } + }, + snis = { + type = "array", + items = { + type = "string", + pattern = [[^\*?[0-9a-zA-Z-.]+$]], + } + }, + exptime = { + type = "integer", + minimum = 1588262400, -- 2020/5/1 0:0:0 + }, + }, + oneOf = { + {required = {"sni", "key", "cert"}}, + {required = {"snis", "key", "cert"}} }, - required = {"sni", "key", "cert"}, additionalProperties = false, } ) @@ -117,7 +131,7 @@ passed --- request GET /apisix/admin/schema/plugins/limit-count --- response_body eval -qr/"required":\["count","time_window","key","rejected_code"]/ +qr/"required":\["count","time_window","key"\]/ --- no_error_log [error] diff --git a/t/admin/services-array-nodes.t b/t/admin/services-array-nodes.t new file mode 100644 index 0000000000000..7ca2c6cb5f8a0 --- /dev/null +++ b/t/admin/services-array-nodes.t @@ -0,0 +1,115 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: set service(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + }, + "desc": "new service" + }]], + [[{ + "node": { + "value": { + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + }, + "desc": "new service" + }, + "key": "/apisix/services/1" + }, + "action": "set" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 2: get service(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_GET, + nil, + [[{ + "node": { + "value": { + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + }, + "desc": "new service" + }, + "key": "/apisix/services/1" + }, + "action": "get" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] diff --git a/t/admin/services-string-id.t b/t/admin/services-string-id.t new file mode 100644 index 0000000000000..63ffce13c4825 --- /dev/null +++ b/t/admin/services-string-id.t @@ -0,0 +1,884 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: set service(id: 5eeb3dc90f747328b2930b0b) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new service" + }]], + [[{ + "node": { + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new service" + }, + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + }, + "action": "set" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 2: get service(id: 5eeb3dc90f747328b2930b0b) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_GET, + nil, + [[{ + "node": { + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new service" + }, + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + }, + "action": "get" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 3: delete service(id: 5eeb3dc90f747328b2930b0b) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_DELETE, + nil, + [[{ + "action": "delete" + }]] + ) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[delete] code: 200 message: passed +--- no_error_log +[error] + + + +=== TEST 4: delete service(id: not_found) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code = t('/apisix/admin/services/not_found', + ngx.HTTP_DELETE, + nil, + [[{ + "action": "delete" + }]] + ) + + ngx.say("[delete] code: ", code) + } + } +--- request +GET /t +--- response_body +[delete] code: 404 +--- no_error_log +[error] + + + +=== TEST 5: post service + delete +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/services', + ngx.HTTP_POST, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]], + [[{ + "node": { + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + } + }, + "action": "create" + }]] + ) + + if code ~= 200 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say("[push] code: ", code, " message: ", message) + + local id = string.sub(res.node.key, #"/apisix/services/" + 1) + code, message = t('/apisix/admin/services/' .. id, + ngx.HTTP_DELETE, + nil, + [[{ + "action": "delete" + }]] + ) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[push] code: 200 message: passed +[delete] code: 200 message: passed +--- no_error_log +[error] + + + +=== TEST 6: uri + upstream +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]], + [[{ + "node": { + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + } + }, + "action": "set" + }]] + ) + + if code ~= 200 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say("[push] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[push] code: 200 message: passed +--- no_error_log +[error] + + + +=== TEST 7: uri + plugins +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }]], + [[{ + "node": { + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + } + }, + "action": "set" + }]] + ) + + if code ~= 200 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say("[push] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[push] code: 200 message: passed +--- no_error_log +[error] + + + +=== TEST 8: invalid empty plugins (todo) + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PUT, + [[{ + "plugins": {} + }]] + ) + + if code ~= 200 then + ngx.status = code + ngx.print(message) + return + end + + ngx.say("[push] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- error_code: 400 +--- SKIP + + + +=== TEST 9: invalid service id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/*invalid_id$', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }]] + ) + + ngx.exit(code) + } + } +--- request +GET /t +--- error_code: 400 +--- no_error_log +[error] + + + +=== TEST 10: invalid id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PUT, + [[{ + "id": "3", + "plugins": {} + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"wrong service id"} +--- no_error_log +[error] + + + +=== TEST 11: id in the rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services', + ngx.HTTP_PUT, + [[{ + "id": "5eeb3dc90f747328b2930b0b", + "plugins": {} + }]], + [[{ + "node": { + "value": { + "plugins": {} + }, + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + }, + "action": "set" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 12: integer id less than 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services', + ngx.HTTP_PUT, + [[{ + "id": -100, + "plugins": {} + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"id\" validation failed: object matches none of the requireds"} +--- no_error_log +[error] + + + +=== TEST 13: invalid service id: contains symbols value +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services', + ngx.HTTP_PUT, + [[{ + "id": "*invalid_id$", + "plugins": {} + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"id\" validation failed: object matches none of the requireds"} +--- no_error_log +[error] + + + +=== TEST 14: no additional properties is valid +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services', + ngx.HTTP_PUT, + [[{ + "id": "5eeb3dc90f747328b2930b0b", + "invalid_property": "/index.html" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: additional properties forbidden, found invalid_property"} +--- no_error_log +[error] + + + +=== TEST 15: invalid upstream_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services', + ngx.HTTP_PUT, + [[{ + "id": "5eeb3dc90f747328b2930b0b", + "upstream_id": "invalid$" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"upstream_id\" validation failed: object matches none of the requireds"} +--- no_error_log +[error] + + + +=== TEST 16: not exist upstream_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services', + ngx.HTTP_PUT, + [[{ + "id": "5eeb3dc90f747328b2930b0b", + "upstream_id": "9999999999" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to fetch upstream info by upstream id [9999999999], response code: 404"} +--- no_error_log +[error] + + + +=== TEST 17: wrong service id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_POST, + [[{ + "plugins": {} + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"wrong service id, do not need it"} +--- no_error_log +[error] + + + +=== TEST 18: wrong service id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services', + ngx.HTTP_POST, + [[{ + "id": "5eeb3dc90f747328b2930b0b", + "plugins": {} + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"wrong service id, do not need it"} +--- no_error_log +[error] + + + +=== TEST 19: patch service(whole) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PATCH, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new 20 service" + }]], + [[{ + "node": { + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new 20 service" + }, + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + }, + "action": "set" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 20: patch service(new desc) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PATCH, + [[{ + "desc": "new 19 service" + }]], + [[{ + "node": { + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new 19 service" + }, + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + }, + "action": "set" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 21: patch service(new nodes) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PATCH, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 4 + }, + "type": "roundrobin" + } + }]], + [[{ + "node": { + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1, + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 4 + }, + "type": "roundrobin" + } + } + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 22: set service(id: 5eeb3dc90f747328b2930b0b) and upstream(type:chash, default hash_on: vars, missing key) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash" + }, + "desc": "new service" + }]]) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"missing key"} +--- no_error_log +[error] + + + +=== TEST 23: set service(id: 5eeb3dc90f747328b2930b0b) and upstream(type:chash, hash_on: header, missing key) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "hash_on": "header" + }, + "desc": "new service" + }]]) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"missing key"} +--- no_error_log +[error] + + + +=== TEST 24: set service(id: 5eeb3dc90f747328b2930b0b) and upstream(type:chash, hash_on: cookie, missing key) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "hash_on": "cookie" + }, + "desc": "new service" + }]]) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"missing key"} +--- no_error_log +[error] + + + +=== TEST 25: set service(id: 5eeb3dc90f747328b2930b0b) and upstream(type:chash, hash_on: consumer, missing key is ok) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "hash_on": "consumer" + }, + "desc": "new service" + }]]) + + ngx.status = code + ngx.say(code .. " " .. body) + } + } +--- request +GET /t +--- response_body +200 passed +--- no_error_log +[error] diff --git a/t/admin/services.t b/t/admin/services.t index 6c6f5037a69ff..546e2ce03b558 100644 --- a/t/admin/services.t +++ b/t/admin/services.t @@ -633,7 +633,7 @@ GET /t location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/services/1/', + local code, body = t('/apisix/admin/services/1', ngx.HTTP_PATCH, [[{ "upstream": { @@ -679,9 +679,11 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/services/1/desc', + local code, body = t('/apisix/admin/services/1', ngx.HTTP_PATCH, - '"new 19 service"', + [[{ + "desc": "new 19 service" + }]], [[{ "node": { "value": { @@ -717,20 +719,23 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/services/1/upstream', + local code, body = t('/apisix/admin/services/1', ngx.HTTP_PATCH, [[{ - "nodes": { - "127.0.0.1:8081": 3, - "127.0.0.1:8082": 4 - }, - "type": "roundrobin" + "upstream": { + "nodes": { + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 4 + }, + "type": "roundrobin" + } }]], [[{ "node": { "value": { "upstream": { "nodes": { + "127.0.0.1:8080": 1, "127.0.0.1:8081": 3, "127.0.0.1:8082": 4 }, @@ -877,3 +882,80 @@ GET /t 200 passed --- no_error_log [error] + + + +=== TEST 26: set service(id: 1 + test service name) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "name": "test service name" + }]], + [[{ + "node": { + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "name": "test service name" + }, + "key": "/apisix/services/1" + }, + "action": "set" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 27: invalid string id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/*invalid', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- no_error_log +[error] diff --git a/t/admin/ssl.t b/t/admin/ssl.t index 15bfb0ae4301c..e93ca1971174b 100644 --- a/t/admin/ssl.t +++ b/t/admin/ssl.t @@ -228,7 +228,7 @@ GET /t GET /t --- error_code: 400 --- response_body -{"error_msg":"invalid configuration: property \"cert\" is required"} +{"error_msg":"invalid configuration: value should match only one schema, but matches none"} --- no_error_log [error] @@ -269,3 +269,175 @@ GET /t passed --- no_error_log [error] + + + +=== TEST 8: store sni in `snis` +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("conf/cert/apisix.crt") + local ssl_key = t.read_file("conf/cert/apisix.key") + local data = { + cert = ssl_cert, key = ssl_key, + snis = {"*.foo.com", "bar.com"}, + } + + local code, body = t.test('/apisix/admin/ssl/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "node": { + "value": { + "snis": ["*.foo.com", "bar.com"] + }, + "key": "/apisix/ssl/1" + }, + "action": "set" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 9: store exptime +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("conf/cert/apisix.crt") + local ssl_key = t.read_file("conf/cert/apisix.key") + local data = { + cert = ssl_cert, key = ssl_key, + sni = "bar.com", + exptime = 1588262400 + 60 * 60 * 24 * 365, + } + + local code, body = t.test('/apisix/admin/ssl/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "node": { + "value": { + "sni": "bar.com", + "exptime": 1619798400 + }, + "key": "/apisix/ssl/1" + }, + "action": "set" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 10: string id +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("conf/cert/apisix.crt") + local ssl_key = t.read_file("conf/cert/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + + local code, body = t.test('/apisix/admin/ssl/a-b-c-ABC_0123', + ngx.HTTP_PUT, + core.json.encode(data) + ) + if code > 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 11: string id(delete) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("conf/cert/apisix.crt") + local ssl_key = t.read_file("conf/cert/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + + local code, body = t.test('/apisix/admin/ssl/a-b-c-ABC_0123', + ngx.HTTP_DELETE + ) + if code > 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 12: invalid id +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("conf/cert/apisix.crt") + local ssl_key = t.read_file("conf/cert/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + + local code, body = t.test('/apisix/admin/ssl/*invalid', + ngx.HTTP_PUT, + core.json.encode(data) + ) + if code > 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- no_error_log +[error] diff --git a/t/admin/stream-routes.t b/t/admin/stream-routes.t index 24b5e5ef0331d..5bd36ff33613d 100644 --- a/t/admin/stream-routes.t +++ b/t/admin/stream-routes.t @@ -297,3 +297,89 @@ GET /t [delete] code: 200 message: passed --- no_error_log [error] + + + +=== TEST 8: string id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/a-b-c-ABC_0123', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 9: string id(delete) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/a-b-c-ABC_0123', + ngx.HTTP_DELETE + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 10: invalid string id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/*invalid', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- no_error_log +[error] diff --git a/t/admin/upstream-array-nodes.t b/t/admin/upstream-array-nodes.t new file mode 100644 index 0000000000000..9f0c5b8d99780 --- /dev/null +++ b/t/admin/upstream-array-nodes.t @@ -0,0 +1,409 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: set upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin", + "desc": "new upstream" + }]], + [[{ + "node": { + "value": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin", + "desc": "new upstream" + }, + "key": "/apisix/upstreams/1" + }, + "action": "set" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 2: get upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_GET, + nil, + [[{ + "node": { + "value": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin", + "desc": "new upstream" + }, + "key": "/apisix/upstreams/1" + }, + "action": "get" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 3: delete upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/upstreams/1', + ngx.HTTP_DELETE, + nil, + [[{ + "action": "delete" + }]] + ) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[delete] code: 200 message: passed +--- no_error_log +[error] + + + +=== TEST 4: delete upstream(id: not_found) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code = t('/apisix/admin/upstreams/not_found', + ngx.HTTP_DELETE, + nil, + [[{ + "action": "delete" + }]] + ) + + ngx.say("[delete] code: ", code) + } + } +--- request +GET /t +--- response_body +[delete] code: 404 +--- no_error_log +[error] + + + +=== TEST 5: push upstream + delete +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/upstreams', + ngx.HTTP_POST, + [[{ + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + }]], + [[{ + "node": { + "value": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + } + }, + "action": "create" + }]] + ) + + if code ~= 200 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say("[push] code: ", code, " message: ", message) + + local id = string.sub(res.node.key, #"/apisix/upstreams/" + 1) + code, message = t('/apisix/admin/upstreams/' .. id, + ngx.HTTP_DELETE, + nil, + [[{ + "action": "delete" + }]] + ) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[push] code: 200 message: passed +[delete] code: 200 message: passed +--- no_error_log +[error] + + + +=== TEST 6: empty nodes +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": [], + "type": "roundrobin" + }]] + ) + + if code ~= 200 then + ngx.status = code + ngx.print(message) + return + end + + ngx.say("[push] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the requireds"} + + + +=== TEST 7: no additional properties is valid +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams', + ngx.HTTP_PUT, + [[{ + "id": 1, + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin", + "invalid_property": "/index.html" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: additional properties forbidden, found invalid_property"} +--- no_error_log +[error] + + + +=== TEST 8: invalid weight of node +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams', + ngx.HTTP_PUT, + [[{ + "id": 1, + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": "1" + }], + "type": "chash" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the requireds"} +--- no_error_log +[error] + + + +=== TEST 9: invalid weight of node +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams', + ngx.HTTP_PUT, + [[{ + "id": 1, + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": -100 + }], + "type": "chash" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the requireds"} +--- no_error_log +[error] + + + +=== TEST 10: invalid port of node +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams', + ngx.HTTP_PUT, + [[{ + "id": 1, + "nodes": [{ + "host": "127.0.0.1", + "port": 0, + "weight": 1 + }], + "type": "chash" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the requireds"} +--- no_error_log +[error] + + + +=== TEST 11: invalid host of node +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams', + ngx.HTTP_PUT, + [[{ + "id": 1, + "nodes": [{ + "host": "127.#.%.1", + "port": 8080, + "weight": 1 + }], + "type": "chash" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the requireds"} +--- no_error_log +[error] diff --git a/t/admin/upstream.t b/t/admin/upstream.t index da131348a43c1..d1167ab80418b 100644 --- a/t/admin/upstream.t +++ b/t/admin/upstream.t @@ -235,7 +235,7 @@ GET /t GET /t --- error_code: 400 --- response_body -{"error_msg":"invalid configuration: property \"nodes\" validation failed: expect object to have at least 1 properties"} +{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the requireds"} @@ -523,7 +523,7 @@ GET /t GET /t --- error_code: 400 --- response_body -{"error_msg":"invalid configuration: property \"nodes\" validation failed: failed to validate 127.0.0.1:8080 (matching \".*\"): wrong type: expected integer, got string"} +{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the requireds"} --- no_error_log [error] @@ -553,7 +553,7 @@ GET /t GET /t --- error_code: 400 --- response_body -{"error_msg":"invalid configuration: property \"nodes\" validation failed: failed to validate 127.0.0.1:8080 (matching \".*\"): expected -100 to be greater than 0"} +{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the requireds"} --- no_error_log [error] @@ -652,7 +652,7 @@ GET /t location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/upstreams/1/', + local code, body = t('/apisix/admin/upstreams/1', ngx.HTTP_PATCH, [[{ "nodes": { @@ -694,9 +694,11 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/upstreams/1/desc', + local code, body = t('/apisix/admin/upstreams/1', ngx.HTTP_PATCH, - '"new 21 upstream"', + [[{ + "desc": "new 21 upstream" + }]], [[{ "node": { "value": { @@ -730,16 +732,19 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/upstreams/1/nodes', + local code, body = t('/apisix/admin/upstreams/1', ngx.HTTP_PATCH, [[{ - "127.0.0.1:8081": 3, - "127.0.0.1:8082": 4 + "nodes": { + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 4 + } }]], [[{ "node": { "value": { "nodes": { + "127.0.0.1:8080": 1, "127.0.0.1:8081": 3, "127.0.0.1:8082": 4 }, @@ -768,18 +773,20 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/upstreams/1/nodes', + local code, body = t('/apisix/admin/upstreams/1', ngx.HTTP_PATCH, [[{ - "127.0.0.1:8081": 0, - "127.0.0.1:8082": 4 + "nodes": { + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 0 + } }]], [[{ "node": { "value": { "nodes": { - "127.0.0.1:8081": 0, - "127.0.0.1:8082": 4 + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 0 }, "type": "roundrobin", "desc": "new 21 upstream" @@ -1140,7 +1147,7 @@ GET /t -=== TEST 35: type chash, hash_on: consumer, don't need upstream key +=== TEST 35: type chash, hash_on: consumer, do not need upstream key --- config location /t { content_by_lua_block { @@ -1230,3 +1237,127 @@ GET /t {"error_msg":"invalid configuration: property \"hash_on\" validation failed: matches non of the enum values"} --- no_error_log [error] + + + +=== TEST 38: set upstream(id: 1 + name: test name) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "name": "test upstream name" + }]], + [[{ + "node": { + "value": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "name": "test upstream name" + }, + "key": "/apisix/upstreams/1" + }, + "action": "set" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 39: string id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/a-b-c-ABC_0123', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 40: string id(delete) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/a-b-c-ABC_0123', + ngx.HTTP_DELETE + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 41: invalid string id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/*invalid', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"id\" validation failed: object matches none of the requireds"} +--- no_error_log +[error] diff --git a/t/apisix.luacov b/t/apisix.luacov index f9792d8953091..0694c2bffa1af 100644 --- a/t/apisix.luacov +++ b/t/apisix.luacov @@ -28,6 +28,7 @@ return { ["apisix/plugins/prometheus/*"] = "plugins/prometheus", ["apisix/plugins/zipkin/*"] = "plugins/zipkin", ["apisix/utils/*"] = "utils", + ["apisix/discovery/*"] = "discovery", -- can not enable both at http and stream, will fix it later. -- ["apisix/stream/*"] = "stream", diff --git a/t/core/etcd-auth-fail.t b/t/core/etcd-auth-fail.t new file mode 100644 index 0000000000000..dfeaffee178fc --- /dev/null +++ b/t/core/etcd-auth-fail.t @@ -0,0 +1,56 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{"ETCD_ENABLE_AUTH"} = "false" +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +# Authentication is enabled at etcd and credentials are set +system('etcdctl --endpoints="http://127.0.0.1:2379" -u root:5tHkHhYkjr6cQY user add root:5tHkHhYkjr6cQY'); +system('etcdctl --endpoints="http://127.0.0.1:2379" -u root:5tHkHhYkjr6cQY auth enable'); +system('etcdctl --endpoints="http://127.0.0.1:2379" -u root:5tHkHhYkjr6cQY role revoke --path "/*" -rw guest'); + +run_tests; + +# Authentication is disabled at etcd & guest access is granted +system('etcdctl --endpoints="http://127.0.0.1:2379" -u root:5tHkHhYkjr6cQY auth disable'); +system('etcdctl --endpoints="http://127.0.0.1:2379" -u root:5tHkHhYkjr6cQY role grant --path "/*" -rw guest'); + + +__DATA__ + +=== TEST 1: Set and Get a value pass +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local key = "/test_key" + local val = "test_value" + local res, err = core.etcd.set(key, val) + ngx.say(err) + } + } +--- request +GET /t +--- response_body +insufficient credentials code: 401 diff --git a/t/core/etcd-auth.t b/t/core/etcd-auth.t new file mode 100644 index 0000000000000..3051a68ffbde1 --- /dev/null +++ b/t/core/etcd-auth.t @@ -0,0 +1,59 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{"ETCD_ENABLE_AUTH"} = "true" +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +# Authentication is enabled at etcd and credentials are set +system('etcdctl --endpoints="http://127.0.0.1:2379" -u root:5tHkHhYkjr6cQY user add root:5tHkHhYkjr6cQY'); +system('etcdctl --endpoints="http://127.0.0.1:2379" -u root:5tHkHhYkjr6cQY auth enable'); +system('etcdctl --endpoints="http://127.0.0.1:2379" -u root:5tHkHhYkjr6cQY role revoke --path "/*" -rw guest'); + +run_tests; + +# Authentication is disabled at etcd & guest access is granted +system('etcdctl --endpoints="http://127.0.0.1:2379" -u root:5tHkHhYkjr6cQY auth disable'); +system('etcdctl --endpoints="http://127.0.0.1:2379" -u root:5tHkHhYkjr6cQY role grant --path "/*" -rw guest'); + +__DATA__ + +=== TEST 1: Set and Get a value pass with authentication +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local key = "/test_key" + local val = "test_value" + core.etcd.set(key, val) + local res, err = core.etcd.get(key) + ngx.say(res.body.node.value) + core.etcd.delete(val) + } + } +--- request +GET /t +--- response_body +test_value +--- no_error_log +[error] diff --git a/t/core/lrucache.t b/t/core/lrucache.t index 5a9aefe08d7c6..83b930869672c 100644 --- a/t/core/lrucache.t +++ b/t/core/lrucache.t @@ -258,7 +258,7 @@ obj: {"idx":2,"_cache_ver":"ver"} end local lru_get = core.lrucache.new({ - ttl = 0.1, count = 256, invalid_stale = true, + ttl = 1, count = 256, invalid_stale = true, }) local function f() diff --git a/t/debug/debug-mode.t b/t/debug/debug-mode.t index dcd66e50d4ad3..63e3141b43f34 100644 --- a/t/debug/debug-mode.t +++ b/t/debug/debug-mode.t @@ -60,11 +60,14 @@ loaded plugin and sort by priority: 10000 name: serverless-pre-function loaded plugin and sort by priority: 4010 name: batch-requests loaded plugin and sort by priority: 4000 name: cors loaded plugin and sort by priority: 3000 name: ip-restriction +loaded plugin and sort by priority: 2900 name: uri-blocker loaded plugin and sort by priority: 2599 name: openid-connect loaded plugin and sort by priority: 2555 name: wolf-rbac loaded plugin and sort by priority: 2520 name: basic-auth loaded plugin and sort by priority: 2510 name: jwt-auth loaded plugin and sort by priority: 2500 name: key-auth +loaded plugin and sort by priority: 2400 name: consumer-restriction +loaded plugin and sort by priority: 2000 name: authz-keycloak loaded plugin and sort by priority: 1010 name: proxy-mirror loaded plugin and sort by priority: 1009 name: proxy-cache loaded plugin and sort by priority: 1008 name: proxy-rewrite @@ -76,16 +79,19 @@ loaded plugin and sort by priority: 900 name: redirect loaded plugin and sort by priority: 899 name: response-rewrite loaded plugin and sort by priority: 506 name: grpc-transcode loaded plugin and sort by priority: 500 name: prometheus +loaded plugin and sort by priority: 412 name: echo +loaded plugin and sort by priority: 410 name: http-logger loaded plugin and sort by priority: 405 name: tcp-logger loaded plugin and sort by priority: 403 name: kafka-logger +loaded plugin and sort by priority: 401 name: syslog loaded plugin and sort by priority: 400 name: udp-logger loaded plugin and sort by priority: 0 name: example-plugin loaded plugin and sort by priority: -1000 name: zipkin +loaded plugin and sort by priority: -1100 name: skywalking loaded plugin and sort by priority: -2000 name: serverless-post-function - === TEST 2: set route(no plugin) --- config location /t { diff --git a/t/discovery/eureka.t b/t/discovery/eureka.t new file mode 100644 index 0000000000000..724cc7dc55895 --- /dev/null +++ b/t/discovery/eureka.t @@ -0,0 +1,131 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +sub read_file($) { + my $infile = shift; + open my $in, $infile + or die "cannot open $infile for reading: $!"; + my $cert = do { local $/; <$in> }; + close $in; + $cert; +} + +our $yaml_config = read_file("conf/config.yaml"); +$yaml_config =~ s/node_listen: 9080/node_listen: 1984/; +$yaml_config =~ s/enable_heartbeat: true/enable_heartbeat: false/; +$yaml_config =~ s/config_center: etcd/config_center: yaml/; +$yaml_config =~ s/enable_admin: true/enable_admin: false/; +$yaml_config =~ s/enable_admin: true/enable_admin: false/; +$yaml_config =~ s/ discovery:/ discovery: eureka #/; +$yaml_config =~ s/# discovery:/ discovery: eureka #/; +$yaml_config =~ s/error_log_level: "warn"/error_log_level: "info"/; + + +$yaml_config .= <<_EOC_; +eureka: + host: + - "http://127.0.0.1:8761" + prefix: "/eureka/" + fetch_interval: 10 + weight: 80 + timeout: + connect: 1500 + send: 1500 + read: 1500 +_EOC_ + +run_tests(); + +__DATA__ + +=== TEST 1: get APISIX-EUREKA info from EUREKA +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /eureka/* + upstream: + service_name: APISIX-EUREKA + type: roundrobin + +#END +--- request +GET /eureka/apps/APISIX-EUREKA +--- response_body_like +.*APISIX-EUREKA.* +--- error_log +use config_center: yaml +default_weight:80. +fetch_interval:10. +eureka uri:http://127.0.0.1:8761/eureka/. +connect_timeout:1500, send_timeout:1500, read_timeout:1500. +--- no_error_log +[error] + + + +=== TEST 2: error service_name name +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /eureka/* + upstream: + service_name: APISIX-EUREKA-DEMO + type: roundrobin + +#END +--- request +GET /eureka/apps/APISIX-EUREKA +--- error_code: 502 +--- error_log eval +qr/.*failed to pick server: no valid upstream node.*/ + + + +=== TEST 3: with proxy-rewrite +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /eureka-test/* + plugins: + proxy-rewrite: + regex_uri: ["^/eureka-test/(.*)", "/${1}"] + upstream: + service_name: APISIX-EUREKA + type: roundrobin + +#END +--- request +GET /eureka-test/eureka/apps/APISIX-EUREKA +--- response_body_like +.*APISIX-EUREKA.* +--- error_log +use config_center: yaml +default_weight:80. +fetch_interval:10. +eureka uri:http://127.0.0.1:8761/eureka/. +connect_timeout:1500, send_timeout:1500, read_timeout:1500. +--- no_error_log +[error] diff --git a/t/lib/server.lua b/t/lib/server.lua index 0f8fbe35d006e..e34602c302340 100644 --- a/t/lib/server.lua +++ b/t/lib/server.lua @@ -28,7 +28,6 @@ function _M.hello1() ngx.say("hello1 world") end - function _M.server_port() ngx.print(ngx.var.server_port) end @@ -90,7 +89,6 @@ function _M.opentracing() ngx.say("opentracing") end - function _M.with_header() ngx.header['Content-Type'] = 'application/xml' ngx.header['X-Server-id'] = 100 @@ -100,6 +98,28 @@ function _M.with_header() ngx.say("!") end +function _M.mock_skywalking_v2_service_register() + ngx.say('[{"key":"APISIX","value":1}]') +end + +function _M.mock_skywalking_v2_instance_register() + ngx.req.read_body() + local data = ngx.req.get_body_data() + data = json_decode(data) + local key = data['instances'][1]['instanceUUID'] + local ret = {} + ret[1] = {key = key, value = 1} + ngx.say(json_encode(ret)) +end + +function _M.mock_skywalking_v2_instance_heartbeat() + ngx.say('skywalking heartbeat ok') +end + +function _M.mock_skywalking_v2_segments() + ngx.say('skywalking segments ok') +end + function _M.mock_zipkin() ngx.req.read_body() local data = ngx.req.get_body_data() diff --git a/t/lib/test_admin.lua b/t/lib/test_admin.lua index 8b4d6c53f5fb7..dc245c3bb7b72 100644 --- a/t/lib/test_admin.lua +++ b/t/lib/test_admin.lua @@ -14,9 +14,12 @@ -- See the License for the specific language governing permissions and -- limitations under the License. -- -local http = require("resty.http") -local json = require("cjson.safe") -local dir_names = {} +local http = require("resty.http") +local json = require("cjson.safe") +local aes = require "resty.aes" +local ngx_encode_base64 = ngx.encode_base64 +local str_find = string.find +local dir_names = {} local _M = {} @@ -101,6 +104,8 @@ end function _M.comp_tab(left_tab, right_tab) + dir_names = {} + if type(left_tab) == "string" then left_tab = json.decode(left_tab) end @@ -110,14 +115,21 @@ function _M.comp_tab(left_tab, right_tab) local ok, err = com_tab(left_tab, right_tab) if not ok then - return 500, "failed, " .. err + return false, err end return true end -function _M.test(uri, method, body, pattern) +function _M.test(uri, method, body, pattern, headers) + if not headers then + headers = {} + end + if not headers["Content-Type"] then + headers["Content-Type"] = "application/x-www-form-urlencoded" + end + if type(body) == "table" then body = json.encode(body) end @@ -139,9 +151,7 @@ function _M.test(uri, method, body, pattern) method = method, body = body, keepalive = false, - headers = { - ["Content-Type"] = "application/x-www-form-urlencoded", - }, + headers = headers, } ) if not res then @@ -203,4 +213,22 @@ function _M.req_self_with_http(uri, method, body, headers) end +function _M.aes_encrypt(origin) + local iv = "1234567890123456" + local aes_128_cbc_with_iv = assert(aes:new(iv, nil, aes.cipher(128, "cbc"), {iv=iv})) + + if aes_128_cbc_with_iv ~= nil and str_find(origin, "---") then + local encrypted = aes_128_cbc_with_iv:encrypt(origin) + if encrypted == nil then + core.log.error("failed to encrypt key[", origin, "] ") + return origin + end + + return ngx_encode_base64(encrypted) + end + + return origin +end + + return _M diff --git a/t/node/filter_func.t b/t/node/filter_func.t new file mode 100644 index 0000000000000..b016d13a537e1 --- /dev/null +++ b/t/node/filter_func.t @@ -0,0 +1,81 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set route with filter_func +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "filter_func": "function(vars) + return vars['arg_a1'] == 'a1' and vars['arg_a2'] == 'a2' + end", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 2: hit route +--- request +GET /hello?a1=a1&a2=a2 +--- response_body +hello world +--- no_error_log +[error] + + + +=== TEST 3: miss route +--- request +GET /hello?a1=xxxx&a2=xxxx +--- error_code: 404 +--- response_body +{"error_msg":"failed to match any routes"} +--- no_error_log +[error] diff --git a/t/node/not-exist-upstream.t b/t/node/not-exist-upstream.t index a5a008d0543d2..73684ea9898b5 100644 --- a/t/node/not-exist-upstream.t +++ b/t/node/not-exist-upstream.t @@ -83,4 +83,4 @@ qr/502 Bad Gateway|500 Internal Server Error/ --- grep_error_log eval qr/\[error\].*/ --- grep_error_log_out eval -qr/failed to pick server: missing upstream configuration while connecting to upstream/ +qr/missing upstream configuration in Route or Service/ diff --git a/t/node/upstream-array-nodes.t b/t/node/upstream-array-nodes.t new file mode 100644 index 0000000000000..9e38da93bdc4a --- /dev/null +++ b/t/node/upstream-array-nodes.t @@ -0,0 +1,215 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": [{ + "host": "127.0.0.1", + "port": 1980, + "weight": 1 + }], + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 2: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 3: hit routes +--- request +GET /hello +--- response_body +hello world +--- no_error_log +[error] + + + +=== TEST 4: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 1980, + "weight": 1 + }], + "type": "roundrobin", + "desc": "new upstream" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 5: hit routes +--- request +GET /hello +--- response_body +hello world +--- no_error_log +[error] + + + +=== TEST 6: set services(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 1980, + "weight": 1 + }], + "type": "roundrobin", + "desc": "new upstream" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 7: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "service_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 8: hit routes +--- request +GET /hello +--- response_body +hello world +--- no_error_log +[error] diff --git a/t/node/vars.t b/t/node/vars.t index fdbe22ea66c55..a51af1f6916c9 100644 --- a/t/node/vars.t +++ b/t/node/vars.t @@ -34,7 +34,7 @@ __DATA__ ngx.HTTP_PUT, [[{ "uri": "/hello", - "vars": [ ["arg_k", "v"] ], + "vars": [ ["arg_k", "==", "v"] ], "upstream": { "nodes": { "127.0.0.1:1980": 1 @@ -100,7 +100,7 @@ hello world ngx.HTTP_PUT, [=[{ "uri": "/hello", - "vars": [["cookie_k", "v"]], + "vars": [["cookie_k", "==", "v"]], "upstream": { "nodes": { "127.0.0.1:1980": 1 @@ -170,7 +170,7 @@ hello world ngx.HTTP_PUT, [=[{ "uri": "/hello", - "vars": [["http_k", "v"]], + "vars": [["http_k", "==", "v"]], "upstream": { "nodes": { "127.0.0.1:1980": 1 @@ -240,7 +240,7 @@ hello world ngx.HTTP_PUT, [=[{ "uri": "/hello", - "vars": [["http_k", "header"], ["cookie_k", "cookie"], ["arg_k", "uri_arg"]], + "vars": [["http_k", "==", "header"], ["cookie_k", "==", "cookie"], ["arg_k", "==", "uri_arg"]], "upstream": { "nodes": { "127.0.0.1:1980": 1 diff --git a/t/plugin/authz-keycloak.t b/t/plugin/authz-keycloak.t new file mode 100644 index 0000000000000..00ebc0ddb2706 --- /dev/null +++ b/t/plugin/authz-keycloak.t @@ -0,0 +1,353 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-keycloak") + local ok, err = plugin.check_schema({ + token_endpoint = "https://efactory-security-portal.salzburgresearch.at/", + grant_type = "urn:ietf:params:oauth:grant-type:uma-ticket" + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- no_error_log +[error] + + + +=== TEST 2: full schema check +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-keycloak") + local ok, err = plugin.check_schema({token_endpoint = "https://efactory-security-portal.salzburgresearch.at/", + permissions = {"res:customer#scopes:view"}, + timeout = 1000, + audience = "University", + grant_type = "urn:ietf:params:oauth:grant-type:uma-ticket" + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- no_error_log +[error] + + + +=== TEST 3: token_endpoint missing +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-keycloak") + local ok, err = plugin.check_schema({permissions = {"res:customer#scopes:view"}}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "token_endpoint" is required +done +--- no_error_log +[error] + + + +=== TEST 4: add plugin with view course permissions +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-keycloak": { + "token_endpoint": "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token", + "permissions": ["course_resource#view"], + "audience": "course_management", + "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", + "timeout": 3000 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]], + [[{ + "node": { + "value": { + "plugins": { + "authz-keycloak": { + "token_endpoint": "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token", + "permissions": ["course_resource#view"], + "audience": "course_management", + "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", + "timeout": 3000 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 5: Get access token for teacher and access view course route +--- config + location /t { + content_by_lua_block { + local json_decode = require("cjson").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + if res.status == 200 then + local body = json_decode(res.body) + local accessToken = body["access_token"] + + + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. accessToken, + } + }) + + if res.status == 200 then + ngx.say(true) + else + ngx.say(false) + end + else + ngx.say(false) + end + } + } +--- request +GET /t +--- response_body +true +--- no_error_log +[error] + + + +=== TEST 6: invalid access token +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer wrong_token", + } + }) + if res.status == 401 then + ngx.say(true) + end + } + } +--- request +GET /t +--- response_body +true +--- error_log +Invalid bearer token + + + +=== TEST 7: add plugin for delete course route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-keycloak": { + "token_endpoint": "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token", + "permissions": ["course_resource#delete"], + "audience": "course_management", + "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", + "timeout": 3000 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]], + [[{ + "node": { + "value": { + "plugins": { + "authz-keycloak": { + "token_endpoint": "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token", + "permissions": ["course_resource#delete"], + "audience": "course_management", + "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", + "timeout": 3000 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 8: Get access token for student and delete course +--- config + location /t { + content_by_lua_block { + local json_decode = require("cjson").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=student@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + if res.status == 200 then + local body = json_decode(res.body) + local accessToken = body["access_token"] + + + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. accessToken, + } + }) + + if res.status == 403 then + ngx.say(true) + else + ngx.say(false) + end + else + ngx.say(false) + end + } + } +--- request +GET /t +--- response_body +true +--- error_log +{"error":"access_denied","error_description":"not_authorized"} diff --git a/t/plugin/batch-requests.t b/t/plugin/batch-requests.t index 9c784a6bd481c..c409ceea4f46b 100644 --- a/t/plugin/batch-requests.t +++ b/t/plugin/batch-requests.t @@ -40,7 +40,8 @@ __DATA__ }, "headers": { "Base-Header": "base", - "Conflict-Header": "header_value" + "ConflictHeader": "header_value", + "OuterConflict": "common_value" }, "pipeline":[ { @@ -48,7 +49,7 @@ __DATA__ "headers": { "Header1": "hello", "Header2": "world", - "Conflict-Header": "b-header-value" + "ConflictHeader": "b-header-value" } },{ "path": "/c", @@ -71,7 +72,8 @@ __DATA__ "X-Res": "B", "X-Header1": "hello", "X-Header2": "world", - "X-Conflict-Header": "b-header-value" + "X-Conflict-Header": "b-header-value", + "X-OuterConflict": "common_value" } }, { @@ -95,8 +97,11 @@ __DATA__ "X-Query-Conflict": "d_value" } } - ]]=] - ) + ]]=], + { + ConflictHeader = "outer_header", + OuterConflict = "outer_confliect" + }) ngx.status = code ngx.say(body) @@ -110,7 +115,8 @@ __DATA__ ngx.header["Base-Query"] = ngx.var.arg_base ngx.header["X-Header1"] = ngx.req.get_headers()["Header1"] ngx.header["X-Header2"] = ngx.req.get_headers()["Header2"] - ngx.header["X-Conflict-Header"] = ngx.req.get_headers()["Conflict-Header"] + ngx.header["X-Conflict-Header"] = ngx.req.get_headers()["ConflictHeader"] + ngx.header["X-OuterConflict"] = ngx.req.get_headers()["OuterConflict"] ngx.header["X-Res"] = "B" ngx.print("B") } @@ -684,3 +690,91 @@ GET /aggregate passed --- no_error_log [error] + + + +=== TEST 15: copy all header to every request except Contenct- +--- config + client_body_in_file_only on; + location = /aggregate { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "timeout": 1000, + "pipeline":[ + { + "path": "/b", + "headers": { + "Header1": "hello", + "Header2": "world" + } + },{ + "path": "/c", + "method": "PUT" + },{ + "path": "/d" + }] + }]=], + [=[[ + { + "status": 200, + "headers": { + "X-Cookie": "request-cookies-b", + "X-HeaderB": "request-header-b" + } + }, + { + "status": 201, + "headers": { + "X-Cookie": "request-cookies-c", + "X-HeaderC": "request-header-c" + } + }, + { + "status": 202, + "headers": { + "X-Cookie": "request-cookies-d", + "X-HeaderD": "request-header-d" + } + } + ]]=], + { + Cookie = "request-cookies", + OuterHeader = "request-header" + }) + + ngx.status = code + ngx.say(body) + } + } + + location = /b { + content_by_lua_block { + ngx.status = 200 + ngx.header["X-Cookie"] = ngx.req.get_headers()["Cookie"] .. "-b" + ngx.header["X-HeaderB"] = ngx.req.get_headers()["OuterHeader"] .. "-b" + } + } + location = /c { + content_by_lua_block { + ngx.status = 201 + ngx.header["X-Cookie"] = ngx.req.get_headers()["Cookie"] .. "-c" + ngx.header["X-HeaderC"] = ngx.req.get_headers()["OuterHeader"] .. "-c" + } + } + location = /d { + content_by_lua_block { + ngx.status = 202 + ngx.header["X-Cookie"] = ngx.req.get_headers()["Cookie"] .. "-d" + ngx.header["X-HeaderD"] = ngx.req.get_headers()["OuterHeader"] .. "-d" + } + } +--- request +GET /aggregate +--- response_body +passed +--- no_error_log +[error] diff --git a/t/plugin/consumer-restriction.t b/t/plugin/consumer-restriction.t new file mode 100644 index 0000000000000..57bbed3d58df1 --- /dev/null +++ b/t/plugin/consumer-restriction.t @@ -0,0 +1,542 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.consumer-restriction") + local conf = { + whitelist = { + "jack1", + "jack2" + } + } + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + + ngx.say(require("cjson").encode(conf)) + } + } +--- request +GET /t +--- response_body +{"whitelist":["jack1","jack2"]} +--- no_error_log +[error] + + + +=== TEST 2: whitelist and blacklist mutual exclusive +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.consumer-restriction") + local ok, err = plugin.check_schema({whitelist={"jack1"}, blacklist={"jack2"}}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +value should match only one schema, but matches both schemas 1 and 2 +done +--- no_error_log +[error] + + + +=== TEST 3: add consumer jack1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack1", + "plugins": { + "basic-auth": { + "username": "jack2019", + "password": "123456" + } + } + }]], + [[{ + "node": { + "value": { + "username": "jack1", + "plugins": { + "basic-auth": { + "username": "jack2019", + "password": "123456" + } + } + } + }, + "action": "set" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 4: add consumer jack2 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack2", + "plugins": { + "basic-auth": { + "username": "jack2020", + "password": "123456" + } + } + }]], + [[{ + "node": { + "value": { + "username": "jack2", + "plugins": { + "basic-auth": { + "username": "jack2020", + "password": "123456" + } + } + } + }, + "action": "set" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 5: set whitelist +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {}, + "consumer-restriction": { + "whitelist": [ + "jack1" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 6: verify unauthorized +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"Missing authorization in request"} +--- no_error_log +[error] + + + +=== TEST 7: verify jack1 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMTk6MTIzNDU2 +--- response_body +hello world +--- no_error_log +[error] + + + +=== TEST 8: verify jack2 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMjA6MTIzNDU2 +--- error_code: 403 +--- response_body +{"message":"The consumer is not allowed"} +--- no_error_log +[error] + + + +=== TEST 9: set blacklist +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {}, + "consumer-restriction": { + "blacklist": [ + "jack1" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 10: verify unauthorized +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"Missing authorization in request"} +--- no_error_log +[error] + + + +=== TEST 11: verify jack1 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMTk6MTIzNDU2 +--- error_code: 403 +--- response_body +{"message":"The consumer is not allowed"} +--- no_error_log +[error] + + + +=== TEST 12: verify jack2 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMjA6MTIzNDU2 +--- response_body +hello world +--- no_error_log +[error] + + + +=== TEST 13: set whitelist without authorization +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "consumer-restriction": { + "whitelist": [ + "jack1" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 14: verify unauthorized +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"Missing authentication or identity verification."} +--- no_error_log +[error] + + + +=== TEST 15: verify jack1 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMTk6MTIzNDU2 +--- error_code: 401 +--- response_body +{"message":"Missing authentication or identity verification."} +--- no_error_log +[error] + + + +=== TEST 16: verify jack2 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMjA6MTIzNDU2 +--- error_code: 401 +--- response_body +{"message":"Missing authentication or identity verification."} +--- no_error_log +[error] + + + +=== TEST 17: set blacklist without authorization +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "consumer-restriction": { + "blacklist": [ + "jack1" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 18: verify unauthorized +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"Missing authentication or identity verification."} +--- no_error_log +[error] + + + +=== TEST 19: verify jack1 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMTk6MTIzNDU2 +--- error_code: 401 +--- response_body +{"message":"Missing authentication or identity verification."} +--- no_error_log +[error] + + + +=== TEST 20: verify jack2 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMjA6MTIzNDU2 +--- error_code: 401 +--- response_body +{"message":"Missing authentication or identity verification."} +--- no_error_log +[error] + + + +=== TEST 21: remove consumer-restriction +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 22: verify jack1 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMTk6MTIzNDU2 +--- response_body +hello world +--- no_error_log +[error] + + + +=== TEST 23: verify jack2 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMjA6MTIzNDU2 +--- response_body +hello world +--- no_error_log +[error] + + + +=== TEST 24: verify unauthorized +--- request +GET /hello +--- response_body +hello world +--- no_error_log +[error] diff --git a/t/plugin/cors.t b/t/plugin/cors.t index 392162f3e368e..364ae909df418 100644 --- a/t/plugin/cors.t +++ b/t/plugin/cors.t @@ -426,3 +426,68 @@ OPTIONS /hello HTTP/1.1 --- no_error_log [error] + + + +=== TEST 15: set route(auth plugins faills) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {}, + "cors": { + "allow_origins": "**", + "allow_methods": "**", + "allow_headers": "*", + "expose_headers": "*" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 16: auth failed still work +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: https://sub.domain.com +ExternalHeader1: val +ExternalHeader2: val +ExternalHeader3: val +--- response_body +{"message":"Missing API key found in request"} +--- error_code: 401 +--- response_headers +Access-Control-Allow-Origin: https://sub.domain.com +Access-Control-Allow-Methods: GET,POST,PUT,DELETE,PATCH,HEAD,OPTIONS,CONNECT,TRACE +Access-Control-Allow-Headers: * +Access-Control-Expose-Headers: * +Access-Control-Max-Age: 5 +Access-Control-Allow-Credentials: +--- no_error_log +[error] diff --git a/t/plugin/echo.t b/t/plugin/echo.t new file mode 100644 index 0000000000000..7040820f8bafe --- /dev/null +++ b/t/plugin/echo.t @@ -0,0 +1,469 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.echo") + local ok, err = plugin.check_schema({before_body = "body before", body = "body to attach" , + after_body = "body to attach"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- no_error_log +[error] + + + +=== TEST 2: wrong type of integer +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.echo") + local ok, err = plugin.check_schema({before_body = "body before", body = "body to attach" , + after_body = 10}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "after_body" validation failed: wrong type: expected string, got number +done +--- no_error_log +[error] + + + +=== TEST 3: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "echo": { + "before_body": "before the body modification ", + "body":"hello upstream", + "headers": { + "Location":"https://www.iresty.com", + "Authorization": "userpass" + }, + "auth_value" : "userpass" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]], + [[{ + "node": { + "value": { + "plugins": { + "echo": { + "before_body": "before the body modification ", + "body":"hello upstream", + "headers": { + "Location":"https://www.iresty.com" + }, + "auth_value" : "userpass" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 4: access +--- request +GET /hello +--- more_headers +Authorization: userpass +--- response_body chomp +before the body modification hello upstream +--- response_headers +Location: https://www.iresty.com +Authorization: userpass +--- no_error_log +[error] +--- wait: 0.2 + + + +=== TEST 5: update plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "echo": { + "before_body": "before the body modification ", + "auth_value" : "userpass", + "headers": { + "Location":"https://www.iresty.com" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]], + [[{ + "node": { + "value": { + "plugins": { + "echo": { + "before_body": "before the body modification ", + "auth_value" : "userpass", + "headers": { + "Location":"https://www.iresty.com" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 6: access without upstream body change +--- request +GET /hello +--- more_headers +Authorization: userpass +--- response_body +before the body modification hello world +--- response_headers +Location: https://www.iresty.com +--- wait: 0.2 +--- no_error_log +[error] +--- wait: 0.2 + + + +=== TEST 7: update plugin back +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "echo": { + "before_body": "before the body modification ", + "auth_value" : "userpasswrd", + "headers": { + "Location":"https://www.iresty.com" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]], + [[{ + "node": { + "value": { + "plugins": { + "echo": { + "before_body": "before the body modification ", + "auth_value" : "userpasswrd", + "headers": { + "Location":"https://www.iresty.com" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 8: access with wrong value in auth header value throws 401 +--- request +GET /hello +--- more_headers +Authorization: userpass +--- error_code: 401 +--- response_body chomp +before the body modification unauthorized body +--- response_headers +Location: https://www.iresty.com + + + +=== TEST 9: update plugin back +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "echo": { + "before_body": "before the body modification ", + "headers": { + "Location":"https://www.iresty.com" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]], + [[{ + "node": { + "value": { + "plugins": { + "echo": { + "before_body": "before the body modification ", + "headers": { + "Location":"https://www.iresty.com" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 10: access with no auth header and value throws 401 +--- request +GET /hello +--- more_headers +Authorization: userpass +--- error_code: 401 +--- response_body chomp +before the body modification unauthorized body +--- response_headers +Location: https://www.iresty.com + + + +=== TEST 11: update plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "echo": { + "before_body": "before the body modification ", + "auth_value" : "userpass", + "headers": { + "Location":"https://www.iresty.com" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]], + [[{ + "node": { + "value": { + "plugins": { + "echo": { + "before_body": "before the body modification ", + "auth_value" : "userpass", + "headers": { + "Location":"https://www.iresty.com" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 12: access without authorization as a header should throws 401 +--- request +GET /hello +--- error_code: 401 +--- response_body chomp +before the body modification unauthorized body +--- response_headers +Location: https://www.iresty.com diff --git a/t/plugin/grpc-transcode.t b/t/plugin/grpc-transcode.t index 92a760cf8529d..9baca82075959 100644 --- a/t/plugin/grpc-transcode.t +++ b/t/plugin/grpc-transcode.t @@ -81,7 +81,7 @@ passed local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, [[{ - "methods": ["GET"], + "methods": ["GET", "POST"], "uri": "/grpctest", "service_protocol": "grpc", "plugins": { @@ -125,7 +125,31 @@ qr/\{"message":"Hello world"\}/ -=== TEST 4: wrong service protocol +=== TEST 4: hit route by post +--- request +POST /grpctest +name=world +--- response_body eval +qr/\{"message":"Hello world"\}/ +--- no_error_log +[error] + + + +=== TEST 5: hit route by post json +--- request +POST /grpctest +{"name": "world"} +--- more_headers +Content-Type: application/json +--- response_body eval +qr/\{"message":"Hello world"\}/ +--- no_error_log +[error] + + + +=== TEST 6: wrong service protocol --- config location /t { content_by_lua_block { @@ -166,7 +190,7 @@ GET /t -=== TEST 5: wrong upstream address +=== TEST 7: wrong upstream address --- config location /t { content_by_lua_block { @@ -208,7 +232,7 @@ passed -=== TEST 6: hit route (Connection refused) +=== TEST 8: hit route (Connection refused) --- request GET /grpctest --- response_body eval @@ -219,7 +243,7 @@ Connection refused) while connecting to upstream -=== TEST 7: update proto(id: 1) +=== TEST 9: update proto(id: 1) --- config location /t { content_by_lua_block { @@ -266,7 +290,7 @@ passed -=== TEST 8: set routes(id: 2) +=== TEST 10: set routes(id: 2) --- config location /t { content_by_lua_block { @@ -309,7 +333,7 @@ passed -=== TEST 9: hit route +=== TEST 11: hit route --- request GET /grpc_plus?a=1&b=2 --- response_body eval @@ -319,7 +343,7 @@ qr/\{"result":3\}/ -=== TEST 10: hit route +=== TEST 12: hit route --- request GET /grpc_plus?a=1&b=2251799813685260 --- response_body eval @@ -329,7 +353,7 @@ qr/\{"result":"#2251799813685261"\}/ -=== TEST 11: set route3 deadline nodelay +=== TEST 13: set route3 deadline nodelay --- config location /t { content_by_lua_block { @@ -371,7 +395,7 @@ passed -=== TEST 12: hit route +=== TEST 14: hit route --- request GET /grpc_deadline?name=apisix --- response_body eval @@ -381,7 +405,7 @@ qr/\{"message":"Hello apisix"\}/ -=== TEST 13: set route4 deadline delay +=== TEST 15: set route4 deadline delay --- config location /t { content_by_lua_block { @@ -423,14 +447,14 @@ passed -=== TEST 14: hit route +=== TEST 16: hit route --- request GET /grpc_delay?name=apisix --- error_code: 504 -=== TEST 15: set routes: missing method +=== TEST 17: set routes: missing method --- config location /t { content_by_lua_block { diff --git a/t/plugin/http-logger.t b/t/plugin/http-logger.t new file mode 100644 index 0000000000000..029a85e17413e --- /dev/null +++ b/t/plugin/http-logger.t @@ -0,0 +1,597 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.http-logger") + local ok, err = plugin.check_schema({uri = "127.0.0.1"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- no_error_log +[error] + + + +=== TEST 2: full schema check +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.http-logger") + local ok, err = plugin.check_schema({uri = "127.0.0.1", + auth_header = "Basic 123", + timeout = 3, + name = "http-logger", + max_retry_count = 2, + retry_delay = 2, + buffer_duration = 2, + inactive_timeout = 2, + batch_max_size = 500, + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- no_error_log +[error] + + + +=== TEST 3: uri is missing +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.http-logger") + local ok, err = plugin.check_schema({auth_header = "Basic 123", + timeout = 3, + name = "http-logger", + max_retry_count = 2, + retry_delay = 2, + buffer_duration = 2, + inactive_timeout = 2, + batch_max_size = 500, + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "uri" is required +done +--- no_error_log +[error] + + + +=== TEST 4: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:1982/hello", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]], + [[{ + "node": { + "value": { + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:1982/hello", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 5: access local server +--- request +GET /opentracing +--- response_body +opentracing +--- error_log +Batch Processor[http logger] successfully processed the entries +--- wait: 0.5 + + + +=== TEST 6: set to the http external endpoint +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:8888/hello-world-http", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]], + [[{ + "node": { + "value": { + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:8888/hello-world-http", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 7: access external endpoint +--- request +GET /hello +--- response_body +hello world +--- error_log +Batch Processor[http logger] successfully processed the entries +--- wait: 1.5 + + + +=== TEST 8: set wrong https endpoint +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "https://127.0.0.1:8888/hello-world-http", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]], + [[{ + "node": { + "value": { + "plugins": { + "http-logger": { + "uri": "https://127.0.0.1:8888/hello-world-http", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 9: access wrong https endpoint +--- request +GET /hello1 +--- response_body +hello1 world +--- error_log +failed to perform SSL with host[127.0.0.1] port[8888] handshake failed +--- wait: 1.5 + + + +=== TEST 10: set correct https endpoint +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "https://127.0.0.1:9999/hello-world-http", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]], + [[{ + "node": { + "value": { + "plugins": { + "http-logger": { + "uri": "https://127.0.0.1:9999/hello-world-http", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 11: access correct https endpoint +--- request +GET /hello1 +--- response_body +hello1 world +--- error_log +Batch Processor[http logger] successfully processed the entries +--- wait: 1.5 + + + +=== TEST 12: set batch max size to two +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "https://127.0.0.1:9999/hello-world-http", + "batch_max_size": 2, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]], + [[{ + "node": { + "value": { + "plugins": { + "http-logger": { + "uri": "https://127.0.0.1:9999/hello-world-http", + "batch_max_size": 2, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 13: access route with batch max size twice +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1" + local res, err = httpc:request_uri(uri, { method = "GET"}) + res, err = httpc:request_uri(uri, { method = "GET"}) + ngx.status = res.status + if res.status == 200 then + ngx.say("hello1 world") + end + } + } +--- request +GET /t +--- response_body +hello1 world +--- error_log +Batch Processor[http logger] batch max size has exceeded +tranferring buffer entries to processing pipe line, buffercount[2] +Batch Processor[http logger] successfully processed the entries +--- wait: 1.5 + + + +=== TEST 14: set wrong port +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:9991/hello-world-http", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]], + [[{ + "node": { + "value": { + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:9991/hello-world-http", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 15: access wrong port +--- request +GET /hello1 +--- response_body +hello1 world +--- error_log +Batch Processor[http logger] failed to process entries: failed to connect to host[127.0.0.1] port[9991] connection refused +--- wait: 1.5 diff --git a/t/plugin/limit-conn.t b/t/plugin/limit-conn.t index 3cfbc6d8f3db8..02f907f6a1ac2 100644 --- a/t/plugin/limit-conn.t +++ b/t/plugin/limit-conn.t @@ -797,3 +797,64 @@ GET /test_concurrency 503 --- error_log limit key: 10.10.10.2route + + + +=== TEST 20: default rejected_code +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 100, + "burst": 50, + "default_conn_delay": 0.1, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]], + [[{ + "node": { + "value": { + "plugins": { + "limit-conn": { + "rejected_code": 503 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] diff --git a/t/plugin/limit-count.t b/t/plugin/limit-count.t index 988a99cf9a9cb..0e22937d029a8 100644 --- a/t/plugin/limit-count.t +++ b/t/plugin/limit-count.t @@ -585,3 +585,55 @@ passed [200, 200, 503, 503] --- no_error_log [error] + + + +=== TEST 20: default rejected_code +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]], + [[{ + "node": { + "value": { + "plugins": { + "limit-count": { + "rejected_code": 503 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] diff --git a/t/plugin/limit-req.t b/t/plugin/limit-req.t index a9d4127bb8c74..001d975d684f3 100644 --- a/t/plugin/limit-req.t +++ b/t/plugin/limit-req.t @@ -379,3 +379,56 @@ GET /t passed --- no_error_log [error] + + + +=== TEST 11: default rejected_code +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 4, + "burst": 2, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "上游节点", + "uri": "/hello" + }]], + [[{ + "node": { + "value": { + "plugins": { + "limit-req": { + "rejected_code": 503, + "key": "remote_addr" + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] diff --git a/t/plugin/prometheus.t b/t/plugin/prometheus.t index 078d4796691e3..4e99b84d4a95c 100644 --- a/t/plugin/prometheus.t +++ b/t/plugin/prometheus.t @@ -524,3 +524,142 @@ GET /apisix/prometheus/metrics qr/apisix_http_status\{code="404",route="3",service="",node="127.0.0.1"\} 2/ --- no_error_log [error] + + + +=== TEST 25: fetch the prometheus metric data with `overhead` +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/.*apisix_http_overhead_bucket.*/ +--- no_error_log +[error] + + + +=== TEST 26: add service 3 to distinguish other services +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/3', + ngx.HTTP_PUT, + [[{ + "plugins": { + "prometheus": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1981": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 27: add a route 4 to redirect /sleep1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/4', + ngx.HTTP_PUT, + [[{ + "service_id": 3, + "uri": "/sleep1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 28: request from client to /sleep1 ( all hit) +--- pipelined_requests eval +["GET /sleep1", "GET /sleep1", "GET /sleep1"] +--- error_code eval +[200, 200, 200] +--- no_error_log +[error] + + + +=== TEST 29: fetch the prometheus metric data with `overhead`(the overhead < 1s) +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_http_overhead_bucket.*service=\"3\".*le=\"00500.0.*/ +--- no_error_log +[error] + + + +=== TEST 30: delete route 4 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/4', + ngx.HTTP_DELETE + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 31: delete service 3 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/3', + ngx.HTTP_DELETE + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] diff --git a/t/plugin/redirect.t b/t/plugin/redirect.t index 0f413c5218127..1415db2da82d8 100644 --- a/t/plugin/redirect.t +++ b/t/plugin/redirect.t @@ -14,18 +14,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # -BEGIN { - if ($ENV{TEST_NGINX_CHECK_LEAK}) { - $SkipReason = "unavailable for the hup tests"; - - } else { - $ENV{TEST_NGINX_USE_HUP} = 1; - undef $ENV{TEST_NGINX_USE_STAP}; - } -} - use t::APISIX 'no_plan'; +$ENV{TEST_NGINX_HTML_DIR} ||= html_dir(); + repeat_each(1); no_long_string(); no_shuffle(); @@ -346,3 +338,340 @@ Location: /hello//bar --- error_code: 301 --- no_error_log [error] + + + +=== TEST 15: http -> https redirect +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "host": "foo.com", + "vars": [ + [ + "scheme", + "==", + "http" + ] + ], + "plugins": { + "redirect": { + "uri": "https://$host$request_uri", + "ret_code": 301 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 16: redirect +--- request +GET /hello +--- more_headers +Host: foo.com +--- error_code: 301 +--- response_headers +Location: https://foo.com/hello + + + +=== TEST 17: enable http_to_https +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "host": "foo.com", + "plugins": { + "redirect": { + "http_to_https": true + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 18: redirect +--- request +GET /hello +--- more_headers +Host: foo.com +--- error_code: 301 +--- response_headers +Location: https://foo.com/hello + + + +=== TEST 19: enable http_to_https with ret_code(not take effect) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "host": "foo.com", + "plugins": { + "redirect": { + "http_to_https": true, + "ret_code": 302 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 20: redirect +--- request +GET /hello +--- more_headers +Host: foo.com +--- error_code: 301 +--- response_headers +Location: https://foo.com/hello + + + +=== TEST 21: wrong configure, enable http_to_https with uri +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "host": "foo.com", + "plugins": { + "redirect": { + "http_to_https": true, + "uri": "/hello" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/error_msg":"failed to check the configuration of plugin redirect err: value should match only one schema, but matches both schemas 1 and 2/ +--- no_error_log +[error] + + + +=== TEST 22: enable http_to_https with upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "host": "test.com", + "plugins": { + "redirect": { + "http_to_https": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 23: redirect +--- request +GET /hello +--- more_headers +Host: test.com +--- error_code: 301 +--- response_headers +Location: https://test.com/hello + + + +=== TEST 24: set ssl(sni: test.com) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("conf/cert/apisix.crt") + local ssl_key = t.read_file("conf/cert/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + + local code, body = t.test('/apisix/admin/ssl/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "node": { + "value": { + "sni": "test.com" + }, + "key": "/apisix/ssl/1" + }, + "action": "set" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 25: client https request +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "test.com", false) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", type(sess)) + + local req = "GET /hello HTTP/1.0\r\nHost: test.com\r\nConnection: close\r\n\r\n" + local bytes, err = sock:send(req) + if not bytes then + ngx.say("failed to send http request: ", err) + return + end + + ngx.say("sent http request: ", bytes, " bytes.") + + while true do + local line, err = sock:receive() + if not line then + -- ngx.say("failed to receive response status line: ", err) + break + end + + ngx.say("received: ", line) + end + + local ok, err = sock:close() + ngx.say("close: ", ok, " ", err) + end -- do + -- collectgarbage() + } +} +--- request +GET /t +--- response_body eval +qr{connected: 1 +ssl handshake: userdata +sent http request: 58 bytes. +received: HTTP/1.1 200 OK +received: Content-Type: text/plain +received: Connection: close +received: Server: \w+ +received: \nreceived: hello world +close: 1 nil} +--- no_error_log +[error] +[alert] diff --git a/t/plugin/serverless.t b/t/plugin/serverless.t index d6c691a3e0b0b..ad7828640dd13 100644 --- a/t/plugin/serverless.t +++ b/t/plugin/serverless.t @@ -568,3 +568,62 @@ passed GET /hello --- error_log serverless pre function:2 + + + +=== TEST 19: http -> https redirect +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "functions" : ["return function() if ngx.var.scheme == \"http\" and ngx.var.host == \"foo.com\" then ngx.header[\"Location\"] = \"https://foo.com\" .. ngx.var.request_uri; ngx.exit(ngx.HTTP_MOVED_PERMANENTLY); end; end"] + } + }, + "uri": "/hello" + }]], + [[{ + "node": { + "value": { + "plugins": { + "serverless-pre-function": { + "functions" : ["return function() if ngx.var.scheme == \"http\" and ngx.var.host == \"foo.com\" then ngx.header[\"Location\"] = \"https://foo.com\" .. ngx.var.request_uri; ngx.exit(ngx.HTTP_MOVED_PERMANENTLY); end; end"] + } + }, + "uri": "/hello" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- more_headers +Host: foo.com +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 20: check plugin +--- request +GET /hello +--- more_headers +Host: foo.com +--- error_code: 301 +--- response_headers +Location: https://foo.com/hello diff --git a/t/plugin/skywalking.t b/t/plugin/skywalking.t new file mode 100644 index 0000000000000..fef7b464031ef --- /dev/null +++ b/t/plugin/skywalking.t @@ -0,0 +1,328 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("debug"); +run_tests; + +__DATA__ + +=== TEST 1: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "skywalking": { + "endpoint": "http://127.0.0.1:1982/mock_skywalking", + "sample_ratio": 1, + "service_name": "APISIX" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]], + [[{ + "node": { + "value": { + "plugins": { + "skywalking": { + "endpoint": "http://127.0.0.1:1982/mock_skywalking", + "sample_ratio": 1, + "service_name":"APISIX" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 2: tiger skywalking +--- request +GET /opentracing +--- response_body +opentracing +--- no_error_log +[error] +--- grep_error_log eval +qr/skywalking service Instance registered, service instance id: \d+/ +--- grep_error_log_out eval +qr/skywalking service Instance registered, service instance id: 1/ + + + +=== TEST 3: test heartbeat +--- request +GET /opentracing +--- response_body +opentracing +--- no_error_log +[error] +--- error_log +skywalking heartbeat ok + + + +=== TEST 4: change sample ratio +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "skywalking": { + "endpoint": "http://127.0.0.1:1982/mock_skywalking", + "sample_ratio": 0.00001 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]], + [[{ + "node": { + "value": { + "plugins": { + "skywalking": { + "endpoint": "http://127.0.0.1:1982/mock_skywalking", + "sample_ratio": 0.00001 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 5: not tiger skywalking +--- request +GET /opentracing +--- response_body +opentracing +--- no_error_log +push data into skywalking context + + + +=== TEST 6: disabled +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]], + [[{ + "node": { + "value": { + "plugins": { + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 7: not tiger skywalking +--- request +GET /opentracing +--- response_body +opentracing +--- no_error_log +rewrite phase of skywalking plugin + + + +=== TEST 8: enable skywalking +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "skywalking": { + "endpoint": "http://127.0.0.1:1982/mock_skywalking", + "sample_ratio": 1, + "service_name": "APISIX" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]], + [[{ + "node": { + "value": { + "plugins": { + "skywalking": { + "endpoint": "http://127.0.0.1:1982/mock_skywalking", + "sample_ratio": 1, + "service_name":"APISIX" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 9: test segments report +--- request +GET /opentracing +--- response_body +opentracing +--- no_error_log +[error] +--- error_log +skywalking segments reported diff --git a/t/plugin/syslog.t b/t/plugin/syslog.t new file mode 100644 index 0000000000000..05a5cd1dd80c0 --- /dev/null +++ b/t/plugin/syslog.t @@ -0,0 +1,267 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.syslog") + local ok, err = plugin.check_schema({ + host = "127.0.0.1", + port = 3000, + }) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- no_error_log +[error] + + + +=== TEST 2: missing port +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.syslog") + local ok, err = plugin.check_schema({host = "127.0.0.1"}) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "port" is required +done +--- no_error_log +[error] + + + +=== TEST 3: wrong type of string +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.syslog") + local ok, err = plugin.check_schema({ + host = "127.0.0.1", + port = "3000", + }) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "port" validation failed: wrong type: expected integer, got string +done +--- no_error_log +[error] + + + +=== TEST 4: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "syslog": { + "host" : "127.0.0.1", + "port" : 5044 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]], + [[{ + "node": { + "value": { + "plugins": { + "syslog": { + "host" : "127.0.0.1", + "port" : 5044 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 5: access +--- request +GET /hello +--- response_body +hello world +--- no_error_log +[error] +--- wait: 0.2 + + + +=== TEST 6: flush manually +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.syslog") + local logger_socket = require "resty.logger.socket" + local logger, err = logger_socket:new({ + host = "127.0.0.1", + port = 5044, + flush_limit = 100, + }) + + local bytes, err = logger:log("abc") + if err then + ngx.log(ngx.ERR, err) + end + + local bytes, err = logger:log("efg") + if err then + ngx.log(ngx.ERR, err) + end + + local ok, err = plugin.flush_syslog(logger) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- request +GET /t +--- no_error_log +[error] + + + +=== TEST 7: small flush_limit, instant flush +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "syslog": { + "host" : "127.0.0.1", + "port" : 5044, + "flush_limit" : 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]], + [[{ + "node": { + "value": { + "plugins": { + "syslog": { + "host" : "127.0.0.1", + "port" : 5044, + "flush_limit" : 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }, + "key": "/apisix/routes/1" + }, + "action": "set" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, {method = "GET"}) + } + } +--- request +GET /hello +hello world +--- no_error_log +[error] +--- wait: 0.2 diff --git a/t/plugin/uri-blocker.t b/t/plugin/uri-blocker.t new file mode 100644 index 0000000000000..3cf2e37bc12b2 --- /dev/null +++ b/t/plugin/uri-blocker.t @@ -0,0 +1,332 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_root_location(); +no_shuffle(); + +run_tests; + +__DATA__ + +=== TEST 1: invalid regular expression +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "uri-blocker": { + "block_rules": [".+("] + } + }, + "uri": "/hello" + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } +} +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin uri-blocker err: pcre_compile() failed: missing ) in \".+(\""} +--- no_error_log +[error] + + + +=== TEST 2: multiple valid rules +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "uri-blocker": { + "block_rules": ["^a", "^b"] + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] +--- error_log +concat block_rules: ^a|^b, + + + +=== TEST 3: multiple rules(include one invalid rule) +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "uri-blocker": { + "block_rules": ["^a", "^b("] + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } +} +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin uri-blocker err: pcre_compile() failed: missing ) in \"^b(\""} +--- no_error_log +[error] + + + +=== TEST 4: sanity +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "uri-blocker": { + "block_rules": ["aa"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]], + [[{ + "node": { + "value": { + "plugins": { + "uri-blocker": { + "block_rules": ["aa"] + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] +--- error_log +concat block_rules: aa, + + + +=== TEST 5: hit block rule +--- request +GET /hello?aa=1 +--- error_code: 403 +--- no_error_log +[error] + + + +=== TEST 6: miss block rule +--- request +GET /hello?bb=2 +--- no_error_log +[error] + + + +=== TEST 7: multiple block rules +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "uri-blocker": { + "block_rules": ["aa", "bb", "c\\d+"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] +--- error_log +concat block_rules: aa|bb|c\d+, + + + +=== TEST 8: hit block rule +--- request +GET /hello?x=bb +--- error_code: 403 +--- no_error_log +[error] + + + +=== TEST 9: hit block rule +--- request +GET /hello?bb=2 +--- error_code: 403 +--- no_error_log +[error] + + + +=== TEST 10: hit block rule +--- request +GET /hello?c1=2 +--- error_code: 403 +--- no_error_log +[error] + + + +=== TEST 11: not hit block rule +--- request +GET /hello?cc=2 +--- no_error_log +[error] + + + +=== TEST 12: SQL injection +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "uri-blocker": { + "block_rules": ["select.+(from|limit)", "(?:(union(.*?)select))"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] +--- error_log +concat block_rules: select.+(from|limit)|(?:(union(.*?)select)), + + + +=== TEST 13: hit block rule +--- request +GET /hello?name=;select%20from%20sys +--- error_code: 403 +--- no_error_log +[error] + + + +=== TEST 14: hit block rule +--- request +GET /hello?name=;union%20select%20 +--- error_code: 403 +--- no_error_log +[error] + + + +=== TEST 15: not hit block rule +--- request +GET /hello?cc=2 +--- no_error_log +[error] diff --git a/t/router/radixtree-sni.t b/t/router/radixtree-sni.t index 8f1854509fd18..86724e04f2ced 100644 --- a/t/router/radixtree-sni.t +++ b/t/router/radixtree-sni.t @@ -362,7 +362,7 @@ passed -=== TEST 8: client request +=== TEST 8: client request: test.com --- config listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; @@ -434,3 +434,510 @@ lua ssl server name: "test.com" --- no_error_log [error] [alert] + + + +=== TEST 9: set ssl(sni: *.test2.com) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("conf/cert/test2.crt") + local ssl_key = t.read_file("conf/cert/test2.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "*.test2.com"} + + local code, body = t.test('/apisix/admin/ssl/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "node": { + "value": { + "sni": "*.test2.com" + }, + "key": "/apisix/ssl/1" + }, + "action": "set" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 10: client request: www.test2.com +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "www.test2.com", true) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", type(sess)) + end -- do + -- collectgarbage() + } +} +--- request +GET /t +--- response_body +connected: 1 +failed to do SSL handshake: 18: self signed certificate +--- error_log +lua ssl server name: "www.test2.com" +--- no_error_log +[error] +[alert] + + + +=== TEST 11: client request: aa.bb.test2.com +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "aa.bb.test2.com", true) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", type(sess)) + end -- do + -- collectgarbage() + } +} +--- request +GET /t +--- response_body +connected: 1 +failed to do SSL handshake: certificate host mismatch +--- error_log +lua ssl server name: "aa.bb.test2.com" +not found any valid sni configuration, matched sni: *.test2.com current sni: aa.bb.test2.com +--- no_error_log +[error] +[alert] + + + +=== TEST 12: disable ssl(sni: *.test2.com) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local data = {status = 0} + + local code, body = t.test('/apisix/admin/ssl/1', + ngx.HTTP_PATCH, + core.json.encode(data), + [[{ + "node": { + "value": { + "status": 0 + }, + "key": "/apisix/ssl/1" + }, + "action": "set" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 13: client request: www.test2.com -- failed by disable +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "www.test2.com", true) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", type(sess)) + end -- do + -- collectgarbage() + } +} +--- request +GET /t +--- response_body +connected: 1 +failed to do SSL handshake: certificate host mismatch +--- error_log +lua ssl server name: "www.test2.com" +--- no_error_log +[error] +[alert] + + + +=== TEST 14: enable ssl(sni: *.test2.com) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local data = {status = 1} + + local code, body = t.test('/apisix/admin/ssl/1', + ngx.HTTP_PATCH, + core.json.encode(data), + [[{ + "node": { + "value": { + "status": 1 + }, + "key": "/apisix/ssl/1" + }, + "action": "set" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 15: client request: www.test2.com again +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "www.test2.com", true) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", type(sess)) + end -- do + -- collectgarbage() + } +} +--- request +GET /t +--- response_body +connected: 1 +failed to do SSL handshake: 18: self signed certificate +--- error_log +lua ssl server name: "www.test2.com" +--- no_error_log +[error] +[alert] + + + +=== TEST 16: set ssl(snis: {test2.com, *.test2.com}) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("conf/cert/test2.crt") + local ssl_key = t.read_file("conf/cert/test2.key") + local data = {cert = ssl_cert, key = ssl_key, snis = {"test2.com", "*.test2.com"}} + + local code, body = t.test('/apisix/admin/ssl/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "node": { + "value": { + "snis": ["test2.com", "*.test2.com"] + }, + "key": "/apisix/ssl/1" + }, + "action": "set" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 17: client request: test2.com +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "test2.com", true) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", type(sess)) + end -- do + -- collectgarbage() + } +} +--- request +GET /t +--- response_body +connected: 1 +failed to do SSL handshake: 18: self signed certificate +--- error_log +lua ssl server name: "test2.com" +--- no_error_log +[error] +[alert] + + + +=== TEST 18: client request: aa.bb.test2.com -- snis un-include +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "aa.bb.test2.com", true) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", type(sess)) + end -- do + -- collectgarbage() + } +} +--- request +GET /t +--- response_body +connected: 1 +failed to do SSL handshake: certificate host mismatch +--- error_log +lua ssl server name: "aa.bb.test2.com" +not found any valid sni configuration, matched sni: ["moc.2tset","moc.2tset.*"] current sni: aa.bb.test2.com +--- no_error_log +[error] +[alert] + + + +=== TEST 19: set ssl(encrypt ssl key with another iv) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("conf/cert/test2.crt") + local ssl_key = t.aes_encrypt(t.read_file("conf/cert/test2.key")) + local data = {cert = ssl_cert, key = ssl_key, snis = {"test2.com", "*.test2.com"}} + + local code, body = t.test('/apisix/admin/ssl/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "node": { + "value": { + "snis": ["test2.com", "*.test2.com"] + }, + "key": "/apisix/ssl/1" + }, + "action": "set" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 20: client request: test2.com +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "test2.com", true) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", type(sess)) + end -- do + -- collectgarbage() + } +} +--- request +GET /t +--- response_body +connected: 1 +failed to do SSL handshake: handshake failed +--- error_log +decrypt ssl key failed. diff --git a/t/stream-plugin/mqtt-proxy.t b/t/stream-plugin/mqtt-proxy.t index 7556fcd491a47..82f5453ba329a 100644 --- a/t/stream-plugin/mqtt-proxy.t +++ b/t/stream-plugin/mqtt-proxy.t @@ -14,9 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -BEGIN { - $ENV{TEST_NGINX_USE_HUP} = 1; -} use t::APISIX 'no_plan';