From 7fd430f2b88c7dbdac0b901ff76d8172f59bb87b Mon Sep 17 00:00:00 2001 From: gh-actions Date: Tue, 19 Sep 2023 02:49:43 +0000 Subject: [PATCH] Deploy website - based on 35c6062c752cbcb728d8c43832852852211fa248 --- 404.html | 4 ++-- assets/js/085ffbd7.4f5184c5.js | 1 + assets/js/085ffbd7.e238594b.js | 1 - assets/js/21bf2b11.774977b9.js | 1 - assets/js/21bf2b11.7a3f0c91.js | 1 + assets/js/46c09b1c.197dc2c9.js | 1 - assets/js/46c09b1c.333d8b8c.js | 1 + assets/js/51ede774.13d9575d.js | 1 - assets/js/51ede774.c006e06c.js | 1 + assets/js/7a460b64.01afc633.js | 1 - assets/js/7a460b64.d6831948.js | 1 + assets/js/872db8be.8ce2cc70.js | 1 - assets/js/872db8be.cbbddffb.js | 1 + assets/js/94f8b490.ab3ddf7d.js | 1 - assets/js/94f8b490.f1f2e0a7.js | 1 + assets/js/9f54a902.519e03cf.js | 1 - assets/js/9f54a902.795207ca.js | 1 + assets/js/c551111a.06a383c6.js | 1 + assets/js/c551111a.610c35d1.js | 1 - assets/js/ddd418a4.2dadf504.js | 1 + assets/js/ddd418a4.7dd57155.js | 1 - assets/js/f23cc535.516e7aad.js | 1 + assets/js/f23cc535.6e1ab86e.js | 1 - ...{runtime~main.658be35f.js => runtime~main.00b4f70d.js} | 2 +- community/discord/index.html | 4 ++-- community/index.html | 4 ++-- create/aws/analytics-using-aws/index.html | 4 ++-- create/aws/bidding-on-aws/index.html | 4 ++-- create/aws/chatapp/index.html | 4 ++-- .../index.html | 4 ++-- create/aws/index.html | 4 ++-- create/aws/redis-on-aws/images/index.html | 4 ++-- create/aws/redis-on-aws/index.html | 4 ++-- create/aws/slackbot/index.html | 4 ++-- create/aws/terraform/index.html | 4 ++-- create/azure/index.html | 4 ++-- create/azure/portal/index.html | 4 ++-- create/azure/terraform-private-endpoint/index.html | 4 ++-- create/azure/terraform-simple/index.html | 4 ++-- create/azurefunctions/index.html | 4 ++-- create/cloud/aws/index.html | 4 ++-- create/cloud/azure/index.html | 4 ++-- create/cloud/gcp/index.html | 4 ++-- create/cloud/images/index.html | 4 ++-- create/cloud/index.html | 4 ++-- create/cloud/rediscloud/images/index.html | 4 ++-- create/cloud/rediscloud/index.html | 4 ++-- create/cloudrun/index.html | 4 ++-- create/docker/index.html | 4 ++-- create/docker/nodejs-nginx-redis/index.html | 4 ++-- create/docker/redis-on-docker/images/index.html | 4 ++-- create/docker/redis-on-docker/index.html | 4 ++-- create/from-source/index.html | 4 ++-- create/gcp/index.html | 4 ++-- create/heroku/herokugo/index.html | 4 ++-- create/heroku/herokujava/index.html | 4 ++-- create/heroku/herokunodejs/index.html | 4 ++-- create/heroku/herokupython/index.html | 4 ++-- create/heroku/herokuruby/index.html | 4 ++-- create/heroku/index.html | 4 ++-- create/heroku/portal/index.html | 4 ++-- create/heroku/ratelimiting-go/index.html | 4 ++-- create/homebrew/index.html | 4 ++-- create/images/index.html | 4 ++-- create/index.html | 4 ++-- create/jenkins/index.html | 4 ++-- create/kubernetes/index.html | 4 ++-- create/kubernetes/kubernetes-gke/index.html | 4 ++-- create/kubernetes/kubernetes-operator/index.html | 4 ++-- create/linux/index.html | 4 ++-- create/netlify/deploy-docusaurus-to-netlify/index.html | 4 ++-- create/netlify/getting-started-with-netlify/index.html | 4 ++-- create/openshift/index.html | 4 ++-- create/portainer/index.html | 4 ++-- create/redis-functions/index.html | 4 ++-- create/redis-stack/index.html | 4 ++-- create/rediscloud/images/index.html | 4 ++-- create/rediscloud/index.html | 4 ++-- create/vercel/index.html | 4 ++-- create/windows/index.html | 4 ++-- devcember/index.html | 4 ++-- develop/C/index.html | 4 ++-- develop/deno/index.html | 4 ++-- .../aspnetcore/caching/basic-api-caching/index.html | 4 ++-- .../aspnetcore/rate-limiting/fixed-window/index.html | 4 ++-- .../dotnet/aspnetcore/rate-limiting/middleware/index.html | 4 ++-- .../aspnetcore/rate-limiting/sliding-window/index.html | 4 ++-- develop/dotnet/index.html | 4 ++-- .../redis-om-dotnet/add-and-retrieve-objects/index.html | 4 ++-- .../aggregations/apply-functions/index.html | 4 ++-- .../redis-om-dotnet/aggregations/groups/groups/index.html | 4 ++-- develop/dotnet/redis-om-dotnet/aggregations/index.html | 4 ++-- develop/dotnet/redis-om-dotnet/getting-started/index.html | 4 ++-- .../searching/creating-an-index/index.html | 4 ++-- .../redis-om-dotnet/searching/geo-filters/index.html | 4 ++-- .../redis-om-dotnet/searching/numeric-queries/index.html | 4 ++-- .../dotnet/redis-om-dotnet/simple-text-queries/index.html | 4 ++-- develop/dotnet/streams/blocking-reads/cs-redis/index.html | 4 ++-- develop/dotnet/streams/blocking-reads/index.html | 4 ++-- .../streams/blocking-reads/service-stack/index.html | 4 ++-- develop/dotnet/streams/stream-basics/index.html | 4 ++-- develop/golang/index.html | 4 ++-- develop/guides/netlify/getting-started/index.html | 4 ++-- develop/index.html | 4 ++-- develop/java/getting-started/index.html | 4 ++-- develop/java/index.html | 4 ++-- develop/java/redis-and-spring-course/index.html | 4 ++-- develop/java/redis-and-spring-course/lesson_1/index.html | 4 ++-- develop/java/redis-and-spring-course/lesson_2/index.html | 4 ++-- develop/java/redis-and-spring-course/lesson_3/index.html | 4 ++-- develop/java/redis-and-spring-course/lesson_4/index.html | 4 ++-- develop/java/redis-and-spring-course/lesson_5/index.html | 4 ++-- develop/java/redis-and-spring-course/lesson_6/index.html | 4 ++-- develop/java/redis-and-spring-course/lesson_7/index.html | 4 ++-- develop/java/redis-and-spring-course/lesson_8/index.html | 4 ++-- develop/java/redis-and-spring-course/lesson_9/index.html | 4 ++-- develop/java/spring/index.html | 4 ++-- develop/java/spring/rate-limiting/fixed-window/index.html | 4 ++-- .../rate-limiting/fixed-window/reactive-gears/index.html | 4 ++-- .../rate-limiting/fixed-window/reactive-lua/index.html | 4 ++-- .../spring/rate-limiting/fixed-window/reactive/index.html | 4 ++-- .../java/spring/rate-limiting/getting-started/index.html | 4 ++-- develop/java/spring/rate-limiting/index.html | 4 ++-- .../java/spring/redis-om/redis-om-spring-hash/index.html | 4 ++-- .../java/spring/redis-om/redis-om-spring-json/index.html | 4 ++-- develop/java/spring/redis-om/redis-om-spring/index.html | 4 ++-- develop/node/gettingstarted/index.html | 4 ++-- develop/node/index.html | 4 ++-- develop/node/node-crash-course/index.html | 4 ++-- develop/node/nodecrashcourse/advancedstreams/index.html | 4 ++-- develop/node/nodecrashcourse/caching/index.html | 4 ++-- .../node/nodecrashcourse/checkinswithstreams/index.html | 4 ++-- develop/node/nodecrashcourse/coursewrapup/index.html | 4 ++-- .../nodecrashcourse/domainobjectswithhashes/index.html | 4 ++-- .../nodecrashcourse/introducingredisinsight/index.html | 4 ++-- .../node/nodecrashcourse/introductiontomodules/index.html | 4 ++-- develop/node/nodecrashcourse/managingsuccess/index.html | 4 ++-- develop/node/nodecrashcourse/redisandnodejs/index.html | 4 ++-- develop/node/nodecrashcourse/redisbloom/index.html | 4 ++-- develop/node/nodecrashcourse/redisearch/index.html | 4 ++-- develop/node/nodecrashcourse/redisjson/index.html | 4 ++-- .../node/nodecrashcourse/runningtheapplication/index.html | 4 ++-- .../nodecrashcourse/sampleapplicationoverview/index.html | 4 ++-- develop/node/nodecrashcourse/sessionstorage/index.html | 4 ++-- develop/node/nodecrashcourse/welcome/index.html | 4 ++-- develop/node/nodecrashcourse/whatisredis/index.html | 4 ++-- develop/node/redis-om/index.html | 4 ++-- develop/php/index.html | 4 ++-- develop/python/fastapi/index.html | 4 ++-- develop/python/index.html | 4 ++-- develop/python/redis-om/index.html | 4 ++-- develop/ruby/index.html | 4 ++-- develop/rust/index.html | 4 ++-- ebooks/8-nosql-data-modeling-patterns/index.html | 4 ++-- ebooks/three-caching-design-patterns/index.html | 4 ++-- explore/datadog/index.html | 4 ++-- explore/import/index.html | 4 ++-- explore/index.html | 4 ++-- explore/redisdatasource/index.html | 4 ++-- explore/redisexplorer/index.html | 4 ++-- explore/redisinsight/autodiscover/index.html | 4 ++-- explore/redisinsight/browser/index.html | 4 ++-- explore/redisinsight/cluster/index.html | 4 ++-- explore/redisinsight/getting-started/index.html | 4 ++-- explore/redisinsight/index.html | 4 ++-- explore/redisinsight/memoryanalyzer/index.html | 4 ++-- explore/redisinsight/profiler/index.html | 4 ++-- explore/redisinsight/redisearch/index.html | 4 ++-- explore/redisinsight/redisgears/index.html | 4 ++-- explore/redisinsight/redisgraph/index.html | 4 ++-- explore/redisinsight/redistimeseries/index.html | 4 ++-- explore/redisinsight/slowlog/index.html | 4 ++-- explore/redisinsight/streams/index.html | 4 ++-- explore/redisinsight/usinghelm/index.html | 4 ++-- explore/redisinsightv2/browser/index.html | 4 ++-- explore/redisinsightv2/getting-started/index.html | 4 ++-- explore/redisinsightv2/index.html | 4 ++-- explore/redisinsightv2/profiler/index.html | 4 ++-- explore/redisinsightv2/redisearch/index.html | 4 ++-- explore/redisinsightv2/windows/index.html | 4 ++-- explore/redismod/index.html | 4 ++-- explore/riot/index.html | 4 ++-- explore/what-is-redis/index.html | 4 ++-- .../how-to-use-ssl-tls-with-redis-enterprise/index.html | 4 ++-- guides/data-modeling/index.html | 4 ++-- guides/import-data/index.html | 4 ++-- .../index.html | 4 ++-- guides/import/index.html | 4 ++-- guides/index.html | 4 ++-- guides/indexing/index.html | 4 ++-- guides/security/index.html | 4 ++-- hacktoberfest/index.html | 4 ++-- hacktoberfest/stories/lara-aasem/index.html | 4 ++-- hacktoberfest/stories/vincent-aceto/index.html | 4 ++-- howtos/analytics/index.html | 4 ++-- howtos/antipatterns/index.html | 4 ++-- howtos/caching/index.html | 4 ++-- howtos/chatapp/index.html | 4 ++-- howtos/frauddetection/index.html | 4 ++-- howtos/hackernews/index.html | 4 ++-- howtos/herokujava/index.html | 4 ++-- howtos/herokunodejs/index.html | 4 ++-- howtos/herokupython/index.html | 4 ++-- howtos/index-modules/index.html | 4 ++-- howtos/index.html | 4 ++-- howtos/leaderboard/index.html | 4 ++-- howtos/moviesdatabase/advancedoption/index.html | 4 ++-- howtos/moviesdatabase/aggregation/index.html | 4 ++-- howtos/moviesdatabase/create/index.html | 4 ++-- howtos/moviesdatabase/getting-started/index.html | 4 ++-- howtos/moviesdatabase/import/index.html | 4 ++-- howtos/moviesdatabase/index.html | 4 ++-- howtos/moviesdatabase/install/index.html | 4 ++-- howtos/moviesdatabase/manage/index.html | 4 ++-- howtos/moviesdatabase/query/index.html | 4 ++-- howtos/moviesdatabase/querymovies/index.html | 4 ++-- howtos/moviesdatabase/sampleapp/index.html | 4 ++-- howtos/nlp/index.html | 4 ++-- howtos/popupstore/index.html | 4 ++-- howtos/quick-start/cheat-sheet/index.html | 8 ++++---- howtos/quick-start/cheat-sheets/connect/index.html | 6 +++--- howtos/quick-start/cheat-sheets/generic/index.html | 6 +++--- howtos/quick-start/cheat-sheets/hashes/index.html | 6 +++--- howtos/quick-start/cheat-sheets/json/index.html | 6 +++--- howtos/quick-start/cheat-sheets/lists/index.html | 6 +++--- .../quick-start/cheat-sheets/search-and-query/index.html | 6 +++--- howtos/quick-start/cheat-sheets/sets/index.html | 6 +++--- howtos/quick-start/cheat-sheets/sorted-sets/index.html | 6 +++--- howtos/quick-start/cheat-sheets/streams/index.html | 6 +++--- howtos/quick-start/cheat-sheets/strings/index.html | 6 +++--- .../cheat-sheets/triggers-and-functions/index.html | 4 ++-- howtos/quick-start/index.html | 4 ++-- howtos/ratelimiting/index.html | 4 ++-- .../index.html | 4 ++-- howtos/redisbloom/images/index.html | 4 ++-- howtos/redisbloom/index.html | 4 ++-- .../with-dotnet/redisbloom-withdotnet/index.html | 4 ++-- howtos/redisearch/images/index.html | 4 ++-- howtos/redisearch/index.html | 4 ++-- howtos/redisgears/index.html | 4 ++-- howtos/redisgraph/csvtograph/index.html | 4 ++-- howtos/redisgraph/explore-python-code/index.html | 4 ++-- howtos/redisgraph/getting-started/index.html | 4 ++-- howtos/redisgraph/index.html | 4 ++-- howtos/redisgraph/redisgraph-cheatsheet/index.html | 4 ++-- howtos/redisgraph/redisgraphmovies/index.html | 4 ++-- howtos/redisgraph/using-dotnet/index.html | 4 ++-- howtos/redisgraph/using-go/index.html | 4 ++-- howtos/redisgraph/using-javascript/index.html | 4 ++-- howtos/redisgraph/using-python/index.html | 4 ++-- howtos/redisgraph/using-redisinsight/index.html | 4 ++-- howtos/redisgraph/using-ruby/index.html | 4 ++-- howtos/redisgraph/using-rust/index.html | 4 ++-- howtos/redisgraphmovies/index.html | 4 ++-- howtos/redisjson/getting-started/index.html | 4 ++-- howtos/redisjson/index.html | 4 ++-- howtos/redisjson/json-using-redisearch/index.html | 4 ++-- howtos/redisjson/jsonind-document/index.html | 4 ++-- howtos/redisjson/jsonindex-document/index.html | 4 ++-- howtos/redisjson/redisjson-cheatsheet/index.html | 4 ++-- howtos/redisjson/shoppingcart/index.html | 4 ++-- howtos/redisjson/storing-complex-json-document/index.html | 4 ++-- howtos/redisjson/storing-json-using-nodejs/index.html | 4 ++-- howtos/redisjson/using-dotnet/index.html | 4 ++-- howtos/redisjson/using-go/index.html | 4 ++-- howtos/redisjson/using-java/index.html | 4 ++-- howtos/redisjson/using-nodejs/index.html | 4 ++-- howtos/redisjson/using-python/index.html | 4 ++-- howtos/redisjson/using-redisinsight/index.html | 4 ++-- howtos/redisjson/using-ruby/index.html | 4 ++-- howtos/redistimeseries/getting-started/images/index.html | 4 ++-- howtos/redistimeseries/getting-started/index.html | 4 ++-- howtos/redistimeseries/index.html | 4 ++-- howtos/redistimeseries/using-dotnet/index.html | 4 ++-- howtos/redistimeseries/using-go/index.html | 4 ++-- howtos/redistimeseries/using-prometheus/index.html | 4 ++-- howtos/redistimeseries/using-python/index.html | 4 ++-- howtos/security/index.html | 4 ++-- howtos/shoppingcart/index.html | 4 ++-- howtos/socialnetwork/index.html | 4 ++-- .../caching-architecture/cache-prefetching/index.html | 4 ++-- .../common-caching/caching-movie-app/index.html | 4 ++-- .../common-caching/redis-gears/index.html | 4 ++-- .../common-caching/source-code-movie-app/index.html | 4 ++-- .../write-behind-vs-write-through/index.html | 4 ++-- .../caching-architecture/write-behind/index.html | 4 ++-- .../caching-architecture/write-through/index.html | 4 ++-- .../common-fraud/source-code-tip/index.html | 4 ++-- .../digital-identity-validation/index.html | 4 ++-- .../fraud-detection/transaction-risk-scoring/index.html | 4 ++-- howtos/solutions/index.html | 4 ++-- .../microservices/api-gateway-caching/index.html | 4 ++-- howtos/solutions/microservices/caching/index.html | 4 ++-- .../microservices-arch-with-redis-old/index.html | 4 ++-- .../common-data/microservices-arch-with-redis/index.html | 4 ++-- .../common-data/microservices-arch/index.html | 4 ++-- .../common-data/microservices-ecommerce-old/index.html | 4 ++-- .../common-data/microservices-ecommerce/index.html | 4 ++-- .../microservices-source-code-tip-old/index.html | 4 ++-- .../common-data/microservices-source-code-tip/index.html | 4 ++-- .../microservices/common-data/redis-enterprise/index.html | 4 ++-- howtos/solutions/microservices/cqrs/index.html | 4 ++-- .../microservices/interservice-communication/index.html | 4 ++-- .../solutions/mobile-banking/account-dashboard/index.html | 4 ++-- .../common-mb/additional-resources/index.html | 4 ++-- .../mobile-banking/common-mb/data-seeding/index.html | 4 ++-- .../mobile-banking/common-mb/source-code-tip/index.html | 4 ++-- .../mobile-banking/session-management/index.html | 4 ++-- .../api/decrement-many-skus/index.html | 4 ++-- .../available-to-promise/api/decrement-sku/index.html | 4 ++-- .../available-to-promise/api/increment-sku/index.html | 4 ++-- .../api/retrieve-many-skus/index.html | 4 ++-- .../available-to-promise/api/retrieve-sku/index.html | 4 ++-- .../available-to-promise/api/update-sku/index.html | 4 ++-- .../real-time-inventory/available-to-promise/index.html | 4 ++-- .../common-rti/additional-resources/index.html | 4 ++-- .../common-rti/customer-proofs/index.html | 4 ++-- .../common-rti/rti-challenges/index.html | 4 ++-- .../common-rti/source-code-tip/index.html | 4 ++-- .../api/inventory-search-with-distance/index.html | 4 ++-- .../api/inventory-search/index.html | 4 ++-- .../real-time-inventory/local-inventory-search/index.html | 4 ++-- howtos/solutions/vector/getting-started-vector/index.html | 4 ++-- index.html | 4 ++-- lp/learn-and-earn-jwt/index.html | 4 ++-- lp/thank-you/index.html | 4 ++-- modules/index-modules/index.html | 4 ++-- modules/redisbloom/index.html | 4 ++-- modules/redisearch/index.html | 4 ++-- modules/redisgears/index.html | 4 ++-- modules/redisgraph/index.html | 4 ++-- modules/redisjson/index.html | 4 ++-- modules/redistimeseries/index.html | 4 ++-- .../argocd/index.html | 4 ++-- .../circleci/index.html | 4 ++-- .../index.html | 4 ++-- .../jenkins/index.html | 4 ++-- operate/docker/nodejs-nginx-redis/index.html | 4 ++-- operate/index.html | 4 ++-- operate/observability/datadog/index.html | 4 ++-- operate/observability/index.html | 4 ++-- operate/observability/prometheus/index.html | 4 ++-- operate/observability/redisdatasource/index.html | 4 ++-- operate/observability/redisexplorer/index.html | 4 ++-- operate/orchestration/docker/images/index.html | 4 ++-- operate/orchestration/docker/index.html | 4 ++-- operate/orchestration/index.html | 4 ++-- operate/orchestration/kubernetes-operator/index.html | 4 ++-- .../orchestration/kubernetes/kubernetes-gke/index.html | 4 ++-- .../provisioning/azure-cache-terraform-private/index.html | 4 ++-- operate/provisioning/azure-cache-terraform/index.html | 4 ++-- operate/provisioning/index.html | 4 ++-- operate/provisioning/terraform/index.html | 4 ++-- operate/redis-at-scale/course-wrap-up/index.html | 4 ++-- .../high-availability/basic-replication/index.html | 4 ++-- .../high-availability/exercise-1/index.html | 4 ++-- .../high-availability/exercise-2/index.html | 4 ++-- operate/redis-at-scale/high-availability/index.html | 4 ++-- .../high-availability/introduction/index.html | 4 ++-- .../high-availability/understanding-sentinels/index.html | 4 ++-- operate/redis-at-scale/index.html | 4 ++-- .../observability/data-points-in-redis/index.html | 4 ++-- .../redis-at-scale/observability/exercise-1/index.html | 4 ++-- .../observability/identifying-issues/index.html | 4 ++-- operate/redis-at-scale/observability/index.html | 4 ++-- .../redis-at-scale/observability/introduction/index.html | 4 ++-- .../persistence-and-durability/exercise/index.html | 4 ++-- .../redis-at-scale/persistence-and-durability/index.html | 4 ++-- .../persistence-and-durability/introduction/index.html | 4 ++-- .../persistence-options-in-redis/index.html | 4 ++-- operate/redis-at-scale/scalability/exercise-1/index.html | 4 ++-- operate/redis-at-scale/scalability/index.html | 4 ++-- .../scalability/lustering-in-redis/index.html | 4 ++-- .../scalability/redis-cli-with-redis-cluster/index.html | 4 ++-- .../redis-cluster-and-client-libraries/index.html | 4 ++-- .../client-performance-improvements/index.html | 4 ++-- .../talking-to-redis/command-line-tool/index.html | 4 ++-- .../configuring-a-redis-server/index.html | 4 ++-- operate/redis-at-scale/talking-to-redis/index.html | 4 ++-- .../talking-to-redis/initial-tuning/index.html | 4 ++-- .../talking-to-redis/redis-clients/index.html | 4 ++-- .../talking-to-redis/redis-server-overview/index.html | 4 ++-- redis-insiders/index.html | 4 ++-- redis-insiders/jyotsna-gupta/index.html | 4 ++-- redis-insiders/michael-owolabi/index.html | 4 ++-- redis-insiders/moiz-kapasi/index.html | 4 ++-- redis-insiders/stevan-thomas/index.html | 4 ++-- redis-live/index.html | 4 ++-- tags/community/index.html | 4 ++-- tags/index.html | 4 ++-- tools/index-tools/index.html | 4 ++-- tools/riot/index.html | 4 ++-- tutorials/redisearch/getting-started/index.html | 4 ++-- 393 files changed, 764 insertions(+), 764 deletions(-) create mode 100644 assets/js/085ffbd7.4f5184c5.js delete mode 100644 assets/js/085ffbd7.e238594b.js delete mode 100644 assets/js/21bf2b11.774977b9.js create mode 100644 assets/js/21bf2b11.7a3f0c91.js delete mode 100644 assets/js/46c09b1c.197dc2c9.js create mode 100644 assets/js/46c09b1c.333d8b8c.js delete mode 100644 assets/js/51ede774.13d9575d.js create mode 100644 assets/js/51ede774.c006e06c.js delete mode 100644 assets/js/7a460b64.01afc633.js create mode 100644 assets/js/7a460b64.d6831948.js delete mode 100644 assets/js/872db8be.8ce2cc70.js create mode 100644 assets/js/872db8be.cbbddffb.js delete mode 100644 assets/js/94f8b490.ab3ddf7d.js create mode 100644 assets/js/94f8b490.f1f2e0a7.js delete mode 100644 assets/js/9f54a902.519e03cf.js create mode 100644 assets/js/9f54a902.795207ca.js create mode 100644 assets/js/c551111a.06a383c6.js delete mode 100644 assets/js/c551111a.610c35d1.js create mode 100644 assets/js/ddd418a4.2dadf504.js delete mode 100644 assets/js/ddd418a4.7dd57155.js create mode 100644 assets/js/f23cc535.516e7aad.js delete mode 100644 assets/js/f23cc535.6e1ab86e.js rename assets/js/{runtime~main.658be35f.js => runtime~main.00b4f70d.js} (97%) diff --git a/404.html b/404.html index 502963c885..9d51aa13c0 100644 --- a/404.html +++ b/404.html @@ -4,7 +4,7 @@ The Home of Redis Developers - + @@ -12,7 +12,7 @@
Skip to main content
- + \ No newline at end of file diff --git a/assets/js/085ffbd7.4f5184c5.js b/assets/js/085ffbd7.4f5184c5.js new file mode 100644 index 0000000000..b281a17e93 --- /dev/null +++ b/assets/js/085ffbd7.4f5184c5.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkredis_developer_hub=self.webpackChunkredis_developer_hub||[]).push([[370],{3905:(e,t,n)=>{n.d(t,{Zo:()=>d,kt:()=>p});var l=n(67294);function s(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function r(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);t&&(l=l.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,l)}return n}function a(e){for(var t=1;t=0||(s[n]=e[n]);return s}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(l=0;l=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(s[n]=e[n])}return s}var i=l.createContext({}),u=function(e){var t=l.useContext(i),n=t;return e&&(n="function"==typeof e?e(t):a(a({},t),e)),n},d=function(e){var t=u(e.components);return l.createElement(i.Provider,{value:t},e.children)},c={inlineCode:"code",wrapper:function(e){var t=e.children;return l.createElement(l.Fragment,{},t)}},m=l.forwardRef((function(e,t){var n=e.components,s=e.mdxType,r=e.originalType,i=e.parentName,d=o(e,["components","mdxType","originalType","parentName"]),m=u(n),p=s,k=m["".concat(i,".").concat(p)]||m[p]||c[p]||r;return n?l.createElement(k,a(a({ref:t},d),{},{components:n})):l.createElement(k,a({ref:t},d))}));function p(e,t){var n=arguments,s=t&&t.mdxType;if("string"==typeof e||s){var r=n.length,a=new Array(r);a[0]=m;var o={};for(var i in t)hasOwnProperty.call(t,i)&&(o[i]=t[i]);o.originalType=e,o.mdxType="string"==typeof e?e:s,a[1]=o;for(var u=2;u{n.d(t,{Z:()=>a});var l=n(67294),s=n(86010);const r="tabItem_Ymn6";function a(e){let{children:t,hidden:n,className:a}=e;return l.createElement("div",{role:"tabpanel",className:(0,s.Z)(r,a),hidden:n},t)}},65488:(e,t,n)=>{n.d(t,{Z:()=>p});var l=n(87462),s=n(67294),r=n(86010),a=n(72389),o=n(67392),i=n(7094),u=n(12466);const d="tabList__CuJ",c="tabItem_LNqP";function m(e){var t;const{lazy:n,block:a,defaultValue:m,values:p,groupId:k,className:h}=e,y=s.Children.map(e.children,(e=>{if((0,s.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),f=p??y.map((e=>{let{props:{value:t,label:n,attributes:l}}=e;return{value:t,label:n,attributes:l}})),v=(0,o.l)(f,((e,t)=>e.value===t.value));if(v.length>0)throw new Error(`Docusaurus error: Duplicate values "${v.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const b=null===m?m:m??(null==(t=y.find((e=>e.props.default)))?void 0:t.props.value)??y[0].props.value;if(null!==b&&!f.some((e=>e.value===b)))throw new Error(`Docusaurus error: The has a defaultValue "${b}" but none of its children has the corresponding value. Available values are: ${f.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:g,setTabGroupChoices:P}=(0,i.U)(),[L,O]=(0,s.useState)(b),N=[],{blockElementScrollPositionUntilNextRender:R}=(0,u.o5)();if(null!=k){const e=g[k];null!=e&&e!==L&&f.some((t=>t.value===e))&&O(e)}const w=e=>{const t=e.currentTarget,n=N.indexOf(t),l=f[n].value;l!==L&&(R(t),O(l),null!=k&&P(k,String(l)))},T=e=>{var t;let n=null;switch(e.key){case"Enter":w(e);break;case"ArrowRight":{const t=N.indexOf(e.currentTarget)+1;n=N[t]??N[0];break}case"ArrowLeft":{const t=N.indexOf(e.currentTarget)-1;n=N[t]??N[N.length-1];break}}null==(t=n)||t.focus()};return s.createElement("div",{className:(0,r.Z)("tabs-container",d)},s.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,r.Z)("tabs",{"tabs--block":a},h)},f.map((e=>{let{value:t,label:n,attributes:a}=e;return s.createElement("li",(0,l.Z)({role:"tab",tabIndex:L===t?0:-1,"aria-selected":L===t,key:t,ref:e=>N.push(e),onKeyDown:T,onClick:w},a,{className:(0,r.Z)("tabs__item",c,null==a?void 0:a.className,{"tabs__item--active":L===t})}),n??t)}))),n?(0,s.cloneElement)(y.filter((e=>e.props.value===L))[0],{className:"margin-top--md"}):s.createElement("div",{className:"margin-top--md"},y.map(((e,t)=>(0,s.cloneElement)(e,{key:t,hidden:e.props.value!==L})))))}function p(e){const t=(0,a.Z)();return s.createElement(m,(0,l.Z)({key:String(t)},e))}},36622:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>p,frontMatter:()=>o,metadata:()=>u,toc:()=>c});var l=n(87462),s=(n(67294),n(3905)),r=n(65488),a=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/lists",id:"howtos/quick-start/cheat-sheets/lists",title:"lists",description:"{l.d(t,{Zo:()=>c,kt:()=>m});var n=l(67294);function r(e,t,l){return t in e?Object.defineProperty(e,t,{value:l,enumerable:!0,configurable:!0,writable:!0}):e[t]=l,e}function s(e,t){var l=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),l.push.apply(l,n)}return l}function o(e){for(var t=1;t=0||(r[l]=e[l]);return r}(e,t);if(Object.getOwnPropertySymbols){var s=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,l)&&(r[l]=e[l])}return r}var u=n.createContext({}),i=function(e){var t=n.useContext(u),l=t;return e&&(l="function"==typeof e?e(t):o(o({},t),e)),l},c=function(e){var t=i(e.components);return n.createElement(u.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},p=n.forwardRef((function(e,t){var l=e.components,r=e.mdxType,s=e.originalType,u=e.parentName,c=a(e,["components","mdxType","originalType","parentName"]),p=i(l),m=r,k=p["".concat(u,".").concat(m)]||p[m]||d[m]||s;return l?n.createElement(k,o(o({ref:t},c),{},{components:l})):n.createElement(k,o({ref:t},c))}));function m(e,t){var l=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var s=l.length,o=new Array(s);o[0]=p;var a={};for(var u in t)hasOwnProperty.call(t,u)&&(a[u]=t[u]);a.originalType=e,a.mdxType="string"==typeof e?e:r,o[1]=a;for(var i=2;i{l.d(t,{Z:()=>o});var n=l(67294),r=l(86010);const s="tabItem_Ymn6";function o(e){let{children:t,hidden:l,className:o}=e;return n.createElement("div",{role:"tabpanel",className:(0,r.Z)(s,o),hidden:l},t)}},65488:(e,t,l)=>{l.d(t,{Z:()=>m});var n=l(87462),r=l(67294),s=l(86010),o=l(72389),a=l(67392),u=l(7094),i=l(12466);const c="tabList__CuJ",d="tabItem_LNqP";function p(e){var t;const{lazy:l,block:o,defaultValue:p,values:m,groupId:k,className:h}=e,y=r.Children.map(e.children,(e=>{if((0,r.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),f=m??y.map((e=>{let{props:{value:t,label:l,attributes:n}}=e;return{value:t,label:l,attributes:n}})),v=(0,a.l)(f,((e,t)=>e.value===t.value));if(v.length>0)throw new Error(`Docusaurus error: Duplicate values "${v.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const b=null===p?p:p??(null==(t=y.find((e=>e.props.default)))?void 0:t.props.value)??y[0].props.value;if(null!==b&&!f.some((e=>e.value===b)))throw new Error(`Docusaurus error: The has a defaultValue "${b}" but none of its children has the corresponding value. Available values are: ${f.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:g,setTabGroupChoices:O}=(0,u.U)(),[P,L]=(0,r.useState)(b),w=[],{blockElementScrollPositionUntilNextRender:T}=(0,i.o5)();if(null!=k){const e=g[k];null!=e&&e!==P&&f.some((t=>t.value===e))&&L(e)}const E=e=>{const t=e.currentTarget,l=w.indexOf(t),n=f[l].value;n!==P&&(T(t),L(n),null!=k&&O(k,String(n)))},N=e=>{var t;let l=null;switch(e.key){case"Enter":E(e);break;case"ArrowRight":{const t=w.indexOf(e.currentTarget)+1;l=w[t]??w[0];break}case"ArrowLeft":{const t=w.indexOf(e.currentTarget)-1;l=w[t]??w[w.length-1];break}}null==(t=l)||t.focus()};return r.createElement("div",{className:(0,s.Z)("tabs-container",c)},r.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,s.Z)("tabs",{"tabs--block":o},h)},f.map((e=>{let{value:t,label:l,attributes:o}=e;return r.createElement("li",(0,n.Z)({role:"tab",tabIndex:P===t?0:-1,"aria-selected":P===t,key:t,ref:e=>w.push(e),onKeyDown:N,onClick:E},o,{className:(0,s.Z)("tabs__item",d,null==o?void 0:o.className,{"tabs__item--active":P===t})}),l??t)}))),l?(0,r.cloneElement)(y.filter((e=>e.props.value===P))[0],{className:"margin-top--md"}):r.createElement("div",{className:"margin-top--md"},y.map(((e,t)=>(0,r.cloneElement)(e,{key:t,hidden:e.props.value!==P})))))}function m(e){const t=(0,o.Z)();return r.createElement(p,(0,n.Z)({key:String(t)},e))}},36622:(e,t,l)=>{l.r(t),l.d(t,{assets:()=>c,contentTitle:()=>u,default:()=>m,frontMatter:()=>a,metadata:()=>i,toc:()=>d});var n=l(87462),r=(l(67294),l(3905)),s=l(65488),o=l(85162);const a={},u=void 0,i={unversionedId:"howtos/quick-start/cheat-sheets/lists",id:"howtos/quick-start/cheat-sheets/lists",title:"lists",description:"{n.d(t,{Zo:()=>c,kt:()=>m});var l=n(67294);function r(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);t&&(l=l.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,l)}return n}function a(e){for(var t=1;t=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(l=0;l=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var u=l.createContext({}),i=function(e){var t=l.useContext(u),n=t;return e&&(n="function"==typeof e?e(t):a(a({},t),e)),n},c=function(e){var t=i(e.components);return l.createElement(u.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return l.createElement(l.Fragment,{},t)}},p=l.forwardRef((function(e,t){var n=e.components,r=e.mdxType,o=e.originalType,u=e.parentName,c=s(e,["components","mdxType","originalType","parentName"]),p=i(n),m=r,k=p["".concat(u,".").concat(m)]||p[m]||d[m]||o;return n?l.createElement(k,a(a({ref:t},c),{},{components:n})):l.createElement(k,a({ref:t},c))}));function m(e,t){var n=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var o=n.length,a=new Array(o);a[0]=p;var s={};for(var u in t)hasOwnProperty.call(t,u)&&(s[u]=t[u]);s.originalType=e,s.mdxType="string"==typeof e?e:r,a[1]=s;for(var i=2;i{n.d(t,{Z:()=>a});var l=n(67294),r=n(86010);const o="tabItem_Ymn6";function a(e){let{children:t,hidden:n,className:a}=e;return l.createElement("div",{role:"tabpanel",className:(0,r.Z)(o,a),hidden:n},t)}},65488:(e,t,n)=>{n.d(t,{Z:()=>m});var l=n(87462),r=n(67294),o=n(86010),a=n(72389),s=n(67392),u=n(7094),i=n(12466);const c="tabList__CuJ",d="tabItem_LNqP";function p(e){var t;const{lazy:n,block:a,defaultValue:p,values:m,groupId:k,className:y}=e,h=r.Children.map(e.children,(e=>{if((0,r.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),f=m??h.map((e=>{let{props:{value:t,label:n,attributes:l}}=e;return{value:t,label:n,attributes:l}})),v=(0,s.l)(f,((e,t)=>e.value===t.value));if(v.length>0)throw new Error(`Docusaurus error: Duplicate values "${v.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const b=null===p?p:p??(null==(t=h.find((e=>e.props.default)))?void 0:t.props.value)??h[0].props.value;if(null!==b&&!f.some((e=>e.value===b)))throw new Error(`Docusaurus error: The has a defaultValue "${b}" but none of its children has the corresponding value. Available values are: ${f.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:g,setTabGroupChoices:T}=(0,u.U)(),[E,O]=(0,r.useState)(b),w=[],{blockElementScrollPositionUntilNextRender:x}=(0,i.o5)();if(null!=k){const e=g[k];null!=e&&e!==E&&f.some((t=>t.value===e))&&O(e)}const C=e=>{const t=e.currentTarget,n=w.indexOf(t),l=f[n].value;l!==E&&(x(t),O(l),null!=k&&T(k,String(l)))},I=e=>{var t;let n=null;switch(e.key){case"Enter":C(e);break;case"ArrowRight":{const t=w.indexOf(e.currentTarget)+1;n=w[t]??w[0];break}case"ArrowLeft":{const t=w.indexOf(e.currentTarget)-1;n=w[t]??w[w.length-1];break}}null==(t=n)||t.focus()};return r.createElement("div",{className:(0,o.Z)("tabs-container",c)},r.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,o.Z)("tabs",{"tabs--block":a},y)},f.map((e=>{let{value:t,label:n,attributes:a}=e;return r.createElement("li",(0,l.Z)({role:"tab",tabIndex:E===t?0:-1,"aria-selected":E===t,key:t,ref:e=>w.push(e),onKeyDown:I,onClick:C},a,{className:(0,o.Z)("tabs__item",d,null==a?void 0:a.className,{"tabs__item--active":E===t})}),n??t)}))),n?(0,r.cloneElement)(h.filter((e=>e.props.value===E))[0],{className:"margin-top--md"}):r.createElement("div",{className:"margin-top--md"},h.map(((e,t)=>(0,r.cloneElement)(e,{key:t,hidden:e.props.value!==E})))))}function m(e){const t=(0,a.Z)();return r.createElement(p,(0,l.Z)({key:String(t)},e))}},32936:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>u,default:()=>m,frontMatter:()=>s,metadata:()=>i,toc:()=>d});var l=n(87462),r=(n(67294),n(3905)),o=n(65488),a=n(85162);const s={},u=void 0,i={unversionedId:"howtos/quick-start/cheat-sheets/strings",id:"howtos/quick-start/cheat-sheets/strings",title:"strings",description:"{n.d(t,{Zo:()=>y,kt:()=>k});var l=n(67294);function r(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function a(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);t&&(l=l.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,l)}return n}function o(e){for(var t=1;t=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(l=0;l=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var i=l.createContext({}),u=function(e){var t=l.useContext(i),n=t;return e&&(n="function"==typeof e?e(t):o(o({},t),e)),n},y=function(e){var t=u(e.components);return l.createElement(i.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return l.createElement(l.Fragment,{},t)}},c=l.forwardRef((function(e,t){var n=e.components,r=e.mdxType,a=e.originalType,i=e.parentName,y=s(e,["components","mdxType","originalType","parentName"]),c=u(n),k=r,m=c["".concat(i,".").concat(k)]||c[k]||d[k]||a;return n?l.createElement(m,o(o({ref:t},y),{},{components:n})):l.createElement(m,o({ref:t},y))}));function k(e,t){var n=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var a=n.length,o=new Array(a);o[0]=c;var s={};for(var i in t)hasOwnProperty.call(t,i)&&(s[i]=t[i]);s.originalType=e,s.mdxType="string"==typeof e?e:r,o[1]=s;for(var u=2;u{n.d(t,{Z:()=>o});var l=n(67294),r=n(86010);const a="tabItem_Ymn6";function o(e){let{children:t,hidden:n,className:o}=e;return l.createElement("div",{role:"tabpanel",className:(0,r.Z)(a,o),hidden:n},t)}},65488:(e,t,n)=>{n.d(t,{Z:()=>k});var l=n(87462),r=n(67294),a=n(86010),o=n(72389),s=n(67392),i=n(7094),u=n(12466);const y="tabList__CuJ",d="tabItem_LNqP";function c(e){var t;const{lazy:n,block:o,defaultValue:c,values:k,groupId:m,className:p}=e,h=r.Children.map(e.children,(e=>{if((0,r.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),v=k??h.map((e=>{let{props:{value:t,label:n,attributes:l}}=e;return{value:t,label:n,attributes:l}})),f=(0,s.l)(v,((e,t)=>e.value===t.value));if(f.length>0)throw new Error(`Docusaurus error: Duplicate values "${f.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const b=null===c?c:c??(null==(t=h.find((e=>e.props.default)))?void 0:t.props.value)??h[0].props.value;if(null!==b&&!v.some((e=>e.value===b)))throw new Error(`Docusaurus error: The has a defaultValue "${b}" but none of its children has the corresponding value. Available values are: ${v.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:g,setTabGroupChoices:T}=(0,i.U)(),[O,E]=(0,r.useState)(b),x=[],{blockElementScrollPositionUntilNextRender:I}=(0,u.o5)();if(null!=m){const e=g[m];null!=e&&e!==O&&v.some((t=>t.value===e))&&E(e)}const w=e=>{const t=e.currentTarget,n=x.indexOf(t),l=v[n].value;l!==O&&(I(t),E(l),null!=m&&T(m,String(l)))},N=e=>{var t;let n=null;switch(e.key){case"Enter":w(e);break;case"ArrowRight":{const t=x.indexOf(e.currentTarget)+1;n=x[t]??x[0];break}case"ArrowLeft":{const t=x.indexOf(e.currentTarget)-1;n=x[t]??x[x.length-1];break}}null==(t=n)||t.focus()};return r.createElement("div",{className:(0,a.Z)("tabs-container",y)},r.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,a.Z)("tabs",{"tabs--block":o},p)},v.map((e=>{let{value:t,label:n,attributes:o}=e;return r.createElement("li",(0,l.Z)({role:"tab",tabIndex:O===t?0:-1,"aria-selected":O===t,key:t,ref:e=>x.push(e),onKeyDown:N,onClick:w},o,{className:(0,a.Z)("tabs__item",d,null==o?void 0:o.className,{"tabs__item--active":O===t})}),n??t)}))),n?(0,r.cloneElement)(h.filter((e=>e.props.value===O))[0],{className:"margin-top--md"}):r.createElement("div",{className:"margin-top--md"},h.map(((e,t)=>(0,r.cloneElement)(e,{key:t,hidden:e.props.value!==O})))))}function k(e){const t=(0,o.Z)();return r.createElement(c,(0,l.Z)({key:String(t)},e))}},32936:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>y,contentTitle:()=>i,default:()=>k,frontMatter:()=>s,metadata:()=>u,toc:()=>d});var l=n(87462),r=(n(67294),n(3905)),a=n(65488),o=n(85162);const s={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/strings",id:"howtos/quick-start/cheat-sheets/strings",title:"strings",description:"{t.d(n,{Zo:()=>c,kt:()=>f});var r=t(67294);function a(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function l(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);n&&(r=r.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,r)}return t}function s(e){for(var n=1;n=0||(a[t]=e[t]);return a}(e,n);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(a[t]=e[t])}return a}var i=r.createContext({}),u=function(e){var n=r.useContext(i),t=n;return e&&(t="function"==typeof e?e(n):s(s({},n),e)),t},c=function(e){var n=u(e.components);return r.createElement(i.Provider,{value:n},e.children)},d={inlineCode:"code",wrapper:function(e){var n=e.children;return r.createElement(r.Fragment,{},n)}},p=r.forwardRef((function(e,n){var t=e.components,a=e.mdxType,l=e.originalType,i=e.parentName,c=o(e,["components","mdxType","originalType","parentName"]),p=u(t),f=a,m=p["".concat(i,".").concat(f)]||p[f]||d[f]||l;return t?r.createElement(m,s(s({ref:n},c),{},{components:t})):r.createElement(m,s({ref:n},c))}));function f(e,n){var t=arguments,a=n&&n.mdxType;if("string"==typeof e||a){var l=t.length,s=new Array(l);s[0]=p;var o={};for(var i in n)hasOwnProperty.call(n,i)&&(o[i]=n[i]);o.originalType=e,o.mdxType="string"==typeof e?e:a,s[1]=o;for(var u=2;u{t.d(n,{Z:()=>s});var r=t(67294),a=t(86010);const l="tabItem_Ymn6";function s(e){let{children:n,hidden:t,className:s}=e;return r.createElement("div",{role:"tabpanel",className:(0,a.Z)(l,s),hidden:t},n)}},65488:(e,n,t)=>{t.d(n,{Z:()=>f});var r=t(87462),a=t(67294),l=t(86010),s=t(72389),o=t(67392),i=t(7094),u=t(12466);const c="tabList__CuJ",d="tabItem_LNqP";function p(e){var n;const{lazy:t,block:s,defaultValue:p,values:f,groupId:m,className:g}=e,h=a.Children.map(e.children,(e=>{if((0,a.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),k=f??h.map((e=>{let{props:{value:n,label:t,attributes:r}}=e;return{value:n,label:t,attributes:r}})),T=(0,o.l)(k,((e,n)=>e.value===n.value));if(T.length>0)throw new Error(`Docusaurus error: Duplicate values "${T.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const E=null===p?p:p??(null==(n=h.find((e=>e.props.default)))?void 0:n.props.value)??h[0].props.value;if(null!==E&&!k.some((e=>e.value===E)))throw new Error(`Docusaurus error: The has a defaultValue "${E}" but none of its children has the corresponding value. Available values are: ${k.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:y,setTabGroupChoices:S}=(0,i.U)(),[O,A]=(0,a.useState)(E),b=[],{blockElementScrollPositionUntilNextRender:R}=(0,u.o5)();if(null!=m){const e=y[m];null!=e&&e!==O&&k.some((n=>n.value===e))&&A(e)}const v=e=>{const n=e.currentTarget,t=b.indexOf(n),r=k[t].value;r!==O&&(R(n),A(r),null!=m&&S(m,String(r)))},N=e=>{var n;let t=null;switch(e.key){case"Enter":v(e);break;case"ArrowRight":{const n=b.indexOf(e.currentTarget)+1;t=b[n]??b[0];break}case"ArrowLeft":{const n=b.indexOf(e.currentTarget)-1;t=b[n]??b[b.length-1];break}}null==(n=t)||n.focus()};return a.createElement("div",{className:(0,l.Z)("tabs-container",c)},a.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,l.Z)("tabs",{"tabs--block":s},g)},k.map((e=>{let{value:n,label:t,attributes:s}=e;return a.createElement("li",(0,r.Z)({role:"tab",tabIndex:O===n?0:-1,"aria-selected":O===n,key:n,ref:e=>b.push(e),onKeyDown:N,onClick:v},s,{className:(0,l.Z)("tabs__item",d,null==s?void 0:s.className,{"tabs__item--active":O===n})}),t??n)}))),t?(0,a.cloneElement)(h.filter((e=>e.props.value===O))[0],{className:"margin-top--md"}):a.createElement("div",{className:"margin-top--md"},h.map(((e,n)=>(0,a.cloneElement)(e,{key:n,hidden:e.props.value!==O})))))}function f(e){const n=(0,s.Z)();return a.createElement(p,(0,r.Z)({key:String(n)},e))}},74476:(e,n,t)=>{t.r(n),t.d(n,{assets:()=>c,contentTitle:()=>i,default:()=>f,frontMatter:()=>o,metadata:()=>u,toc:()=>d});var r=t(87462),a=(t(67294),t(3905)),l=t(65488),s=t(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/search-and-query",id:"howtos/quick-start/cheat-sheets/search-and-query",title:"search-and-query",description:" 18\nconst query4 = '(@skills:{NodeJS})';\nconst searchResult = await client.ft.search(\n STAFF_INDEX_KEY,\n query1, //query2, query3, query4\n {\n RETURN: ['name', 'age', 'isSingle'],\n LIMIT: {\n from: 0,\n size: 10,\n },\n },\n);\nconsole.log(JSON.stringify(searchResult));\n//{\"total\":1,\"documents\":[{\"id\":\"staff:2\",\"value\":{\"name\":\"Alex\",\"age\":\"45\",\"isSingle\":\"1\"}}]}\n\n/*\n FT.AGGREGATE index query\n Run a search query on an index, and perform aggregate transformations on the results\n\n FT.AGGREGATE staff:index \"(@age:[(10 +inf])\" \n GROUPBY 1 @age \n REDUCE COUNT 0 AS userCount\n SORTBY 1 @age\n LIMIT 0 10\n */\nconst aggregateResult = await client.ft.aggregate(\n STAFF_INDEX_KEY,\n '(@age:[(10 +inf])',\n {\n STEPS: [\n {\n type: AggregateSteps.GROUPBY,\n properties: ['@age'],\n REDUCE: [\n {\n type: AggregateGroupByReducers.COUNT,\n AS: 'userCount',\n },\n ],\n },\n {\n type: AggregateSteps.SORTBY,\n BY: '@age',\n },\n {\n type: AggregateSteps.LIMIT,\n from: 0,\n size: 10,\n },\n ],\n },\n);\nconsole.log(JSON.stringify(aggregateResult));\n//{\"total\":2,\"results\":[{\"age\":\"22\",\"userCount\":\"1\"},{\"age\":\"45\",\"userCount\":\"1\"}]}\n//----\n\n/*\n FT.INFO index\n Return information and statistics on the index\n O(1)\n */\nconst infoResult = await client.ft.info(STAFF_INDEX_KEY);\nconsole.log(infoResult);\n/**\n {\n indexName: 'staff:index',\n numDocs: '2',\n maxDocId: '4',\n stopWords: 2\n ...\n }\n */\n")))))}f.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/46c09b1c.333d8b8c.js b/assets/js/46c09b1c.333d8b8c.js new file mode 100644 index 0000000000..299bc2a5d2 --- /dev/null +++ b/assets/js/46c09b1c.333d8b8c.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkredis_developer_hub=self.webpackChunkredis_developer_hub||[]).push([[5979],{3905:(e,n,t)=>{t.d(n,{Zo:()=>u,kt:()=>m});var a=t(67294);function r(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function s(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);n&&(a=a.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,a)}return t}function l(e){for(var n=1;n=0||(r[t]=e[t]);return r}(e,n);if(Object.getOwnPropertySymbols){var s=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(r[t]=e[t])}return r}var o=a.createContext({}),d=function(e){var n=a.useContext(o),t=n;return e&&(t="function"==typeof e?e(n):l(l({},n),e)),t},u=function(e){var n=d(e.components);return a.createElement(o.Provider,{value:n},e.children)},c={inlineCode:"code",wrapper:function(e){var n=e.children;return a.createElement(a.Fragment,{},n)}},p=a.forwardRef((function(e,n){var t=e.components,r=e.mdxType,s=e.originalType,o=e.parentName,u=i(e,["components","mdxType","originalType","parentName"]),p=d(t),m=r,f=p["".concat(o,".").concat(m)]||p[m]||c[m]||s;return t?a.createElement(f,l(l({ref:n},u),{},{components:t})):a.createElement(f,l({ref:n},u))}));function m(e,n){var t=arguments,r=n&&n.mdxType;if("string"==typeof e||r){var s=t.length,l=new Array(s);l[0]=p;var i={};for(var o in n)hasOwnProperty.call(n,o)&&(i[o]=n[o]);i.originalType=e,i.mdxType="string"==typeof e?e:r,l[1]=i;for(var d=2;d{t.d(n,{Z:()=>l});var a=t(67294),r=t(86010);const s="tabItem_Ymn6";function l(e){let{children:n,hidden:t,className:l}=e;return a.createElement("div",{role:"tabpanel",className:(0,r.Z)(s,l),hidden:t},n)}},65488:(e,n,t)=>{t.d(n,{Z:()=>m});var a=t(87462),r=t(67294),s=t(86010),l=t(72389),i=t(67392),o=t(7094),d=t(12466);const u="tabList__CuJ",c="tabItem_LNqP";function p(e){var n;const{lazy:t,block:l,defaultValue:p,values:m,groupId:f,className:g}=e,y=r.Children.map(e.children,(e=>{if((0,r.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),T=m??y.map((e=>{let{props:{value:n,label:t,attributes:a}}=e;return{value:n,label:t,attributes:a}})),h=(0,i.l)(T,((e,n)=>e.value===n.value));if(h.length>0)throw new Error(`Docusaurus error: Duplicate values "${h.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const E=null===p?p:p??(null==(n=y.find((e=>e.props.default)))?void 0:n.props.value)??y[0].props.value;if(null!==E&&!T.some((e=>e.value===E)))throw new Error(`Docusaurus error: The has a defaultValue "${E}" but none of its children has the corresponding value. Available values are: ${T.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:S,setTabGroupChoices:x}=(0,o.U)(),[k,O]=(0,r.useState)(E),A=[],{blockElementScrollPositionUntilNextRender:N}=(0,d.o5)();if(null!=f){const e=S[f];null!=e&&e!==k&&T.some((n=>n.value===e))&&O(e)}const R=e=>{const n=e.currentTarget,t=A.indexOf(n),a=T[t].value;a!==k&&(N(n),O(a),null!=f&&x(f,String(a)))},F=e=>{var n;let t=null;switch(e.key){case"Enter":R(e);break;case"ArrowRight":{const n=A.indexOf(e.currentTarget)+1;t=A[n]??A[0];break}case"ArrowLeft":{const n=A.indexOf(e.currentTarget)-1;t=A[n]??A[A.length-1];break}}null==(n=t)||n.focus()};return r.createElement("div",{className:(0,s.Z)("tabs-container",u)},r.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,s.Z)("tabs",{"tabs--block":l},g)},T.map((e=>{let{value:n,label:t,attributes:l}=e;return r.createElement("li",(0,a.Z)({role:"tab",tabIndex:k===n?0:-1,"aria-selected":k===n,key:n,ref:e=>A.push(e),onKeyDown:F,onClick:R},l,{className:(0,s.Z)("tabs__item",c,null==l?void 0:l.className,{"tabs__item--active":k===n})}),t??n)}))),t?(0,r.cloneElement)(y.filter((e=>e.props.value===k))[0],{className:"margin-top--md"}):r.createElement("div",{className:"margin-top--md"},y.map(((e,n)=>(0,r.cloneElement)(e,{key:n,hidden:e.props.value!==k})))))}function m(e){const n=(0,l.Z)();return r.createElement(p,(0,a.Z)({key:String(n)},e))}},74476:(e,n,t)=>{t.r(n),t.d(n,{assets:()=>u,contentTitle:()=>o,default:()=>m,frontMatter:()=>i,metadata:()=>d,toc:()=>c});var a=t(87462),r=(t(67294),t(3905)),s=t(65488),l=t(85162);const i={},o=void 0,d={unversionedId:"howtos/quick-start/cheat-sheets/search-and-query",id:"howtos/quick-start/cheat-sheets/search-and-query",title:"search-and-query",description:" 18\nconst query4 = '(@skills:{NodeJS})';\nconst searchResult = await client.ft.search(\n STAFF_INDEX_KEY,\n query1, //query2, query3, query4\n {\n RETURN: ['name', 'age', 'isSingle'],\n LIMIT: {\n from: 0,\n size: 10,\n },\n },\n);\nconsole.log(JSON.stringify(searchResult));\n//{\"total\":1,\"documents\":[{\"id\":\"staff:2\",\"value\":{\"name\":\"Alex\",\"age\":\"45\",\"isSingle\":\"1\"}}]}\n\n/*\n FT.AGGREGATE index query\n Run a search query on an index, and perform aggregate transformations on the results\n\n FT.AGGREGATE staff:index \"(@age:[(10 +inf])\"\n GROUPBY 1 @age\n REDUCE COUNT 0 AS userCount\n SORTBY 1 @age\n LIMIT 0 10\n */\nconst aggregateResult = await client.ft.aggregate(\n STAFF_INDEX_KEY,\n '(@age:[(10 +inf])',\n {\n STEPS: [\n {\n type: AggregateSteps.GROUPBY,\n properties: ['@age'],\n REDUCE: [\n {\n type: AggregateGroupByReducers.COUNT,\n AS: 'userCount',\n },\n ],\n },\n {\n type: AggregateSteps.SORTBY,\n BY: '@age',\n },\n {\n type: AggregateSteps.LIMIT,\n from: 0,\n size: 10,\n },\n ],\n },\n);\nconsole.log(JSON.stringify(aggregateResult));\n//{\"total\":2,\"results\":[{\"age\":\"22\",\"userCount\":\"1\"},{\"age\":\"45\",\"userCount\":\"1\"}]}\n//----\n\n/*\n FT.INFO index\n Return information and statistics on the index\n O(1)\n */\nconst infoResult = await client.ft.info(STAFF_INDEX_KEY);\nconsole.log(infoResult);\n/**\n {\n indexName: 'staff:index',\n numDocs: '2',\n maxDocId: '4',\n stopWords: 2\n ...\n }\n */\n"))),(0,r.kt)(l.Z,{value:"PYTHON",mdxType:"TabItem"},(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-python"},"try:\n r.ft('idx-employees').dropindex()\nexcept:\n pass\n\n# FT.CREATE index [ON HASH | JSON] [PREFIX count prefix [prefix ...]] SCHEMA field_name [AS alias] TEXT | TAG | NUMERIC | GEO | VECTOR | GEOSHAP [SORTABLE [UNF]] [NOINDEX] [ field_name [AS alias] TEXT | TAG | NUMERIC | GEO | VECTOR | GEOSHAPE [ SORTABLE [UNF]] [NOINDEX] ...]\n# O(K) where K is the number of fields in the document, O(N) for keys in the keyspace\n# Creates a new search index with the given specification.\nschema = (TextField('$.name', as_name='name', sortable=True), NumericField('$.age', as_name='age', sortable=True),\n TagField('$.single', as_name='single'), TagField('$.skills[*]', as_name='skills'))\n\nr.ft('idx-employees').create_index(schema, definition=IndexDefinition(\n prefix=['employee_profile:'], index_type=IndexType.JSON))\n\n# FT.INFO index\n# O(1)\n# Return information and statistics on the index.\nr.ft('idx-employees').info()\n\n# FT.SEARCH index query\n# O(N)\n# Search the index with a textual query, returning either documents or just ids\nr.ft('idx-employees').search('Nicol')\nr.ft('idx-employees').search(\"@single:{false}\")\nr.ft('idx-employees').search(\"@skills:{python}\")\nr.ft('idx-employees').search(Query(\"*\").add_filter(NumericFilter('age', 30, 40)))\nr.json().arrappend('employee_profile:karol', '$.skills', 'python', 'java', 'c#')\nr.ft('idx-employees').search(Query(\"@skills:{java}, @skills:{python}\"))\n\n# FT.AGGREGATE index query\n# O(1)\n# Run a search query on an index, and perform aggregate transformations on the results, extracting statistics etc from them\nr.ft('idx-employees').aggregate(aggregations.AggregateRequest(\"*\").group_by('@age',\n reducers.count().alias('count')).sort_by(\"@age\")).rows\n\nr.ft('idx-employees').aggregate(aggregations.AggregateRequest(\"@skills:{python}\").group_by('@skills',\n reducers.tolist('@name').alias('names'))).rows\n"))),(0,r.kt)(l.Z,{value:"C#",mdxType:"TabItem"},(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-csharp"},'try\n{\n /*\n * FT.DROPINDEX index [DD]\n * O(1)\n * Deletes an index and all the documents in it.\n */\n db.FT().DropIndex("idx-employees");\n}\ncatch\n{\n // Index not found\n}\n\n/*\n * FT.CREATE index [ON HASH | JSON] [PREFIX count prefix [prefix ...]] SCHEMA\n * field_name [AS alias] TEXT | TAG | NUMERIC | GEO | VECTOR | GEOSHAP [SORTABLE\n * [UNF]] [NOINDEX] [ field_name [AS alias] TEXT | TAG | NUMERIC | GEO | VECTOR\n * | GEOSHAPE [ SORTABLE [UNF]] [NOINDEX] ...]\n * O(K) where K is the number of fields in the document, O(N) for keys in the\n * keyspace\n * Creates a new search index with the given specification.\n */\ndb.FT().Create("idx-employees", new FTCreateParams()\n .On(IndexDataType.JSON)\n .Prefix("employee_profile:"),\n new Schema()\n .AddTextField(new FieldName("$.name", "name"), sortable: true)\n .AddNumericField(new FieldName("$.age", "age"), sortable: true)\n .AddTagField(new FieldName("$.single", "single"))\n .AddTagField(new FieldName("$.skills[*]", "skills")));\n\n/*\n * FT.INFO index\n * O(1)\n * Returns information and statistics on the index.\n */\ndb.FT().Info("idx-employees");\n\n/*\n * FT._LIST\n * O(1)\n * Returns a list of all existing indexes.\n */\ndb.FT()._List();\n\n/*\n * FT.SEARCH index query\n * O(N)\n * Search the index with a textual query, returning either documents or just ids\n */\ndb.FT().Search("idx-employees", new Query("@name:{nicol}"));\ndb.FT().Search("idx-employees", new Query("@single:{false}"));\ndb.FT().Search("idx-employees", new Query("@skills:{python}"));\ndb.FT().Search("idx-employees", new Query().AddFilter(new NumericFilter("@age", 30, 40)));\ndb.JSON().ArrAppend("employee_profile:karol", "$.skills", "python", "java", "c#");\ndb.FT().Search("idx-employees", new Query("@skills:{java}, @skills:{python}"));\n\n/*\n * FT.AGGREGATE index query\n * O(1)\n * Run a search query on an index, and perform aggregate transformations on the\n * results, extracting statistics etc from them\n */\ndb.FT().Aggregate("idx-employees", new AggregationRequest("@age:[20 40]")\n .GroupBy("@age", Reducers.Count().As("count"))\n .SortBy(new SortedField("@age", SortedField.SortOrder.ASC)));\ndb.FT().Aggregate("idx-employees", new AggregationRequest("@skills:{python}")\n .GroupBy("@skills", Reducers.ToList("@name").As("names")));\n'))),(0,r.kt)(l.Z,{value:"JAVA",mdxType:"TabItem"},(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-java"},'try {\n jedis.ftDropIndex("idx-employees");\n} catch (Exception e) {\n // Index not found\n}\n\n/*\n * FT.CREATE index [ON HASH | JSON] [PREFIX count prefix [prefix ...]] SCHEMA\n * field_name [AS alias] TEXT | TAG | NUMERIC | GEO | VECTOR | GEOSHAP [SORTABLE\n * [UNF]] [NOINDEX] [ field_name [AS alias] TEXT | TAG | NUMERIC | GEO | VECTOR\n * | GEOSHAPE [ SORTABLE [UNF]] [NOINDEX] ...]\n * O(K) where K is the number of fields in the document, O(N) for keys in the\n * keyspace\n * Creates a new search index with the given specification.\n */\nSchema schema = new Schema()\n .addSortableTextField("$.name", 1.0).as("name")\n .addSortableNumericField("$.age").as("age")\n .addTagField("$.single").as("single")\n .addTagField("$.skills[*]").as("skills");\n\nIndexDefinition def = new IndexDefinition(IndexDefinition.Type.JSON)\n .setPrefixes("employee_profile:");\n\njedis.ftCreate("idx-employees", IndexOptions.defaultOptions().setDefinition(def), schema);\n\n/*\n * FT.INFO index\n * O(1)\n * Returns information and statistics on the index.\n */\njedis.ftInfo("idx-employees");\n\n/*\n * FT._LIST\n * O(1)\n * Returns a list of all existing indexes.\n */\njedis.ftList();\n\n/*\n * FT.SEARCH index query\n * O(N)\n * Search the index with a textual query, returning either documents or just ids\n */\njedis.ftSearch("idx-employees", "Nicol");\njedis.ftSearch("idx-employees", "@single:{false}");\njedis.ftSearch("idx-employees", "@skills:{python}");\njedis.ftSearch("idx-employees", "*",\n FTSearchParams.searchParams().filter(new NumericFilter("age", 30, 40)));\njedis.jsonArrAppend("employee_profile:karol", Path2.of("$.skills"), "\\"python\\"", "\\"java\\"", "\\"c#\\"");\njedis.ftSearch("idx-employees", "@skills:{java}, @skills:{python}");\n\n/*\n * FT.AGGREGATE index query\n * O(1)\n * Run a search query on an index, and perform aggregate transformations on the\n * results, extracting statistics etc from them\n */\njedis.ftAggregate("idx-employees", new AggregationBuilder()\n .groupBy("@age", Reducers.count().as("count")).sortBy(new SortedField("@age", SortOrder.ASC)))\n .getRows();\njedis.ftAggregate("idx-employees", new AggregationBuilder("@skills:{python}")\n .groupBy("@skills", Reducers.to_list("@name").as("names")))\n .getRows();\n')))))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/51ede774.13d9575d.js b/assets/js/51ede774.13d9575d.js deleted file mode 100644 index 8b36bc9d79..0000000000 --- a/assets/js/51ede774.13d9575d.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkredis_developer_hub=self.webpackChunkredis_developer_hub||[]).push([[5426],{3905:(e,t,n)=>{n.d(t,{Zo:()=>p,kt:()=>d});var l=n(67294);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function r(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);t&&(l=l.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,l)}return n}function o(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(l=0;l=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var u=l.createContext({}),i=function(e){var t=l.useContext(u),n=t;return e&&(n="function"==typeof e?e(t):o(o({},t),e)),n},p=function(e){var t=i(e.components);return l.createElement(u.Provider,{value:t},e.children)},h={inlineCode:"code",wrapper:function(e){var t=e.children;return l.createElement(l.Fragment,{},t)}},c=l.forwardRef((function(e,t){var n=e.components,a=e.mdxType,r=e.originalType,u=e.parentName,p=s(e,["components","mdxType","originalType","parentName"]),c=i(n),d=a,m=c["".concat(u,".").concat(d)]||c[d]||h[d]||r;return n?l.createElement(m,o(o({ref:t},p),{},{components:n})):l.createElement(m,o({ref:t},p))}));function d(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var r=n.length,o=new Array(r);o[0]=c;var s={};for(var u in t)hasOwnProperty.call(t,u)&&(s[u]=t[u]);s.originalType=e,s.mdxType="string"==typeof e?e:a,o[1]=s;for(var i=2;i{n.d(t,{Z:()=>o});var l=n(67294),a=n(86010);const r="tabItem_Ymn6";function o(e){let{children:t,hidden:n,className:o}=e;return l.createElement("div",{role:"tabpanel",className:(0,a.Z)(r,o),hidden:n},t)}},65488:(e,t,n)=>{n.d(t,{Z:()=>d});var l=n(87462),a=n(67294),r=n(86010),o=n(72389),s=n(67392),u=n(7094),i=n(12466);const p="tabList__CuJ",h="tabItem_LNqP";function c(e){var t;const{lazy:n,block:o,defaultValue:c,values:d,groupId:m,className:k}=e,v=a.Children.map(e.children,(e=>{if((0,a.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),y=d??v.map((e=>{let{props:{value:t,label:n,attributes:l}}=e;return{value:t,label:n,attributes:l}})),f=(0,s.l)(y,((e,t)=>e.value===t.value));if(f.length>0)throw new Error(`Docusaurus error: Duplicate values "${f.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const N=null===c?c:c??(null==(t=v.find((e=>e.props.default)))?void 0:t.props.value)??v[0].props.value;if(null!==N&&!y.some((e=>e.value===N)))throw new Error(`Docusaurus error: The has a defaultValue "${N}" but none of its children has the corresponding value. Available values are: ${y.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:O,setTabGroupChoices:b}=(0,u.U)(),[w,g]=(0,a.useState)(N),S=[],{blockElementScrollPositionUntilNextRender:E}=(0,i.o5)();if(null!=m){const e=O[m];null!=e&&e!==w&&y.some((t=>t.value===e))&&g(e)}const R=e=>{const t=e.currentTarget,n=S.indexOf(t),l=y[n].value;l!==w&&(E(t),g(l),null!=m&&b(m,String(l)))},J=e=>{var t;let n=null;switch(e.key){case"Enter":R(e);break;case"ArrowRight":{const t=S.indexOf(e.currentTarget)+1;n=S[t]??S[0];break}case"ArrowLeft":{const t=S.indexOf(e.currentTarget)-1;n=S[t]??S[S.length-1];break}}null==(t=n)||t.focus()};return a.createElement("div",{className:(0,r.Z)("tabs-container",p)},a.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,r.Z)("tabs",{"tabs--block":o},k)},y.map((e=>{let{value:t,label:n,attributes:o}=e;return a.createElement("li",(0,l.Z)({role:"tab",tabIndex:w===t?0:-1,"aria-selected":w===t,key:t,ref:e=>S.push(e),onKeyDown:J,onClick:R},o,{className:(0,r.Z)("tabs__item",h,null==o?void 0:o.className,{"tabs__item--active":w===t})}),n??t)}))),n?(0,a.cloneElement)(v.filter((e=>e.props.value===w))[0],{className:"margin-top--md"}):a.createElement("div",{className:"margin-top--md"},v.map(((e,t)=>(0,a.cloneElement)(e,{key:t,hidden:e.props.value!==w})))))}function d(e){const t=(0,o.Z)();return a.createElement(c,(0,l.Z)({key:String(t)},e))}},86299:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>p,contentTitle:()=>u,default:()=>d,frontMatter:()=>s,metadata:()=>i,toc:()=>h});var l=n(87462),a=(n(67294),n(3905)),r=n(65488),o=n(85162);const s={},u=void 0,i={unversionedId:"howtos/quick-start/cheat-sheets/json",id:"howtos/quick-start/cheat-sheets/json",title:"json",description:"{n.d(t,{Zo:()=>p,kt:()=>d});var l=n(67294);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function r(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);t&&(l=l.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,l)}return n}function o(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(l=0;l=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var i=l.createContext({}),u=function(e){var t=l.useContext(i),n=t;return e&&(n="function"==typeof e?e(t):o(o({},t),e)),n},p=function(e){var t=u(e.components);return l.createElement(i.Provider,{value:t},e.children)},h={inlineCode:"code",wrapper:function(e){var t=e.children;return l.createElement(l.Fragment,{},t)}},c=l.forwardRef((function(e,t){var n=e.components,a=e.mdxType,r=e.originalType,i=e.parentName,p=s(e,["components","mdxType","originalType","parentName"]),c=u(n),d=a,m=c["".concat(i,".").concat(d)]||c[d]||h[d]||r;return n?l.createElement(m,o(o({ref:t},p),{},{components:n})):l.createElement(m,o({ref:t},p))}));function d(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var r=n.length,o=new Array(r);o[0]=c;var s={};for(var i in t)hasOwnProperty.call(t,i)&&(s[i]=t[i]);s.originalType=e,s.mdxType="string"==typeof e?e:a,o[1]=s;for(var u=2;u{n.d(t,{Z:()=>o});var l=n(67294),a=n(86010);const r="tabItem_Ymn6";function o(e){let{children:t,hidden:n,className:o}=e;return l.createElement("div",{role:"tabpanel",className:(0,a.Z)(r,o),hidden:n},t)}},65488:(e,t,n)=>{n.d(t,{Z:()=>d});var l=n(87462),a=n(67294),r=n(86010),o=n(72389),s=n(67392),i=n(7094),u=n(12466);const p="tabList__CuJ",h="tabItem_LNqP";function c(e){var t;const{lazy:n,block:o,defaultValue:c,values:d,groupId:m,className:k}=e,y=a.Children.map(e.children,(e=>{if((0,a.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),N=d??y.map((e=>{let{props:{value:t,label:n,attributes:l}}=e;return{value:t,label:n,attributes:l}})),f=(0,s.l)(N,((e,t)=>e.value===t.value));if(f.length>0)throw new Error(`Docusaurus error: Duplicate values "${f.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const v=null===c?c:c??(null==(t=y.find((e=>e.props.default)))?void 0:t.props.value)??y[0].props.value;if(null!==v&&!N.some((e=>e.value===v)))throw new Error(`Docusaurus error: The has a defaultValue "${v}" but none of its children has the corresponding value. Available values are: ${N.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:O,setTabGroupChoices:S}=(0,i.U)(),[b,w]=(0,a.useState)(v),g=[],{blockElementScrollPositionUntilNextRender:J}=(0,u.o5)();if(null!=m){const e=O[m];null!=e&&e!==b&&N.some((t=>t.value===e))&&w(e)}const j=e=>{const t=e.currentTarget,n=g.indexOf(t),l=N[n].value;l!==b&&(J(t),w(l),null!=m&&S(m,String(l)))},R=e=>{var t;let n=null;switch(e.key){case"Enter":j(e);break;case"ArrowRight":{const t=g.indexOf(e.currentTarget)+1;n=g[t]??g[0];break}case"ArrowLeft":{const t=g.indexOf(e.currentTarget)-1;n=g[t]??g[g.length-1];break}}null==(t=n)||t.focus()};return a.createElement("div",{className:(0,r.Z)("tabs-container",p)},a.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,r.Z)("tabs",{"tabs--block":o},k)},N.map((e=>{let{value:t,label:n,attributes:o}=e;return a.createElement("li",(0,l.Z)({role:"tab",tabIndex:b===t?0:-1,"aria-selected":b===t,key:t,ref:e=>g.push(e),onKeyDown:R,onClick:j},o,{className:(0,r.Z)("tabs__item",h,null==o?void 0:o.className,{"tabs__item--active":b===t})}),n??t)}))),n?(0,a.cloneElement)(y.filter((e=>e.props.value===b))[0],{className:"margin-top--md"}):a.createElement("div",{className:"margin-top--md"},y.map(((e,t)=>(0,a.cloneElement)(e,{key:t,hidden:e.props.value!==b})))))}function d(e){const t=(0,o.Z)();return a.createElement(c,(0,l.Z)({key:String(t)},e))}},86299:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>p,contentTitle:()=>i,default:()=>d,frontMatter:()=>s,metadata:()=>u,toc:()=>h});var l=n(87462),a=(n(67294),n(3905)),r=n(65488),o=n(85162);const s={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/json",id:"howtos/quick-start/cheat-sheets/json",title:"json",description:"{n.d(t,{Zo:()=>m,kt:()=>k});var l=n(67294);function r(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function s(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);t&&(l=l.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,l)}return n}function o(e){for(var t=1;t=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var s=Object.getOwnPropertySymbols(e);for(l=0;l=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var u=l.createContext({}),i=function(e){var t=l.useContext(u),n=t;return e&&(n="function"==typeof e?e(t):o(o({},t),e)),n},m=function(e){var t=i(e.components);return l.createElement(u.Provider,{value:t},e.children)},c={inlineCode:"code",wrapper:function(e){var t=e.children;return l.createElement(l.Fragment,{},t)}},d=l.forwardRef((function(e,t){var n=e.components,r=e.mdxType,s=e.originalType,u=e.parentName,m=a(e,["components","mdxType","originalType","parentName"]),d=i(n),k=r,p=d["".concat(u,".").concat(k)]||d[k]||c[k]||s;return n?l.createElement(p,o(o({ref:t},m),{},{components:n})):l.createElement(p,o({ref:t},m))}));function k(e,t){var n=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var s=n.length,o=new Array(s);o[0]=d;var a={};for(var u in t)hasOwnProperty.call(t,u)&&(a[u]=t[u]);a.originalType=e,a.mdxType="string"==typeof e?e:r,o[1]=a;for(var i=2;i{n.d(t,{Z:()=>o});var l=n(67294),r=n(86010);const s="tabItem_Ymn6";function o(e){let{children:t,hidden:n,className:o}=e;return l.createElement("div",{role:"tabpanel",className:(0,r.Z)(s,o),hidden:n},t)}},65488:(e,t,n)=>{n.d(t,{Z:()=>k});var l=n(87462),r=n(67294),s=n(86010),o=n(72389),a=n(67392),u=n(7094),i=n(12466);const m="tabList__CuJ",c="tabItem_LNqP";function d(e){var t;const{lazy:n,block:o,defaultValue:d,values:k,groupId:p,className:y}=e,f=r.Children.map(e.children,(e=>{if((0,r.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),b=k??f.map((e=>{let{props:{value:t,label:n,attributes:l}}=e;return{value:t,label:n,attributes:l}})),h=(0,a.l)(b,((e,t)=>e.value===t.value));if(h.length>0)throw new Error(`Docusaurus error: Duplicate values "${h.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const S=null===d?d:d??(null==(t=f.find((e=>e.props.default)))?void 0:t.props.value)??f[0].props.value;if(null!==S&&!b.some((e=>e.value===S)))throw new Error(`Docusaurus error: The has a defaultValue "${S}" but none of its children has the corresponding value. Available values are: ${b.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:v,setTabGroupChoices:g}=(0,u.U)(),[E,O]=(0,r.useState)(S),R=[],{blockElementScrollPositionUntilNextRender:D}=(0,i.o5)();if(null!=p){const e=v[p];null!=e&&e!==E&&b.some((t=>t.value===e))&&O(e)}const T=e=>{const t=e.currentTarget,n=R.indexOf(t),l=b[n].value;l!==E&&(D(t),O(l),null!=p&&g(p,String(l)))},w=e=>{var t;let n=null;switch(e.key){case"Enter":T(e);break;case"ArrowRight":{const t=R.indexOf(e.currentTarget)+1;n=R[t]??R[0];break}case"ArrowLeft":{const t=R.indexOf(e.currentTarget)-1;n=R[t]??R[R.length-1];break}}null==(t=n)||t.focus()};return r.createElement("div",{className:(0,s.Z)("tabs-container",m)},r.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,s.Z)("tabs",{"tabs--block":o},y)},b.map((e=>{let{value:t,label:n,attributes:o}=e;return r.createElement("li",(0,l.Z)({role:"tab",tabIndex:E===t?0:-1,"aria-selected":E===t,key:t,ref:e=>R.push(e),onKeyDown:w,onClick:T},o,{className:(0,s.Z)("tabs__item",c,null==o?void 0:o.className,{"tabs__item--active":E===t})}),n??t)}))),n?(0,r.cloneElement)(f.filter((e=>e.props.value===E))[0],{className:"margin-top--md"}):r.createElement("div",{className:"margin-top--md"},f.map(((e,t)=>(0,r.cloneElement)(e,{key:t,hidden:e.props.value!==E})))))}function k(e){const t=(0,o.Z)();return r.createElement(d,(0,l.Z)({key:String(t)},e))}},93880:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>m,contentTitle:()=>u,default:()=>k,frontMatter:()=>a,metadata:()=>i,toc:()=>c});var l=n(87462),r=(n(67294),n(3905)),s=n(65488),o=n(85162);const a={},u=void 0,i={unversionedId:"howtos/quick-start/cheat-sheets/sets",id:"howtos/quick-start/cheat-sheets/sets",title:"sets",description:"{n.d(t,{Zo:()=>m,kt:()=>p});var l=n(67294);function r(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function s(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);t&&(l=l.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,l)}return n}function a(e){for(var t=1;t=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var s=Object.getOwnPropertySymbols(e);for(l=0;l=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var u=l.createContext({}),i=function(e){var t=l.useContext(u),n=t;return e&&(n="function"==typeof e?e(t):a(a({},t),e)),n},m=function(e){var t=i(e.components);return l.createElement(u.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return l.createElement(l.Fragment,{},t)}},c=l.forwardRef((function(e,t){var n=e.components,r=e.mdxType,s=e.originalType,u=e.parentName,m=o(e,["components","mdxType","originalType","parentName"]),c=i(n),p=r,k=c["".concat(u,".").concat(p)]||c[p]||d[p]||s;return n?l.createElement(k,a(a({ref:t},m),{},{components:n})):l.createElement(k,a({ref:t},m))}));function p(e,t){var n=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var s=n.length,a=new Array(s);a[0]=c;var o={};for(var u in t)hasOwnProperty.call(t,u)&&(o[u]=t[u]);o.originalType=e,o.mdxType="string"==typeof e?e:r,a[1]=o;for(var i=2;i{n.d(t,{Z:()=>a});var l=n(67294),r=n(86010);const s="tabItem_Ymn6";function a(e){let{children:t,hidden:n,className:a}=e;return l.createElement("div",{role:"tabpanel",className:(0,r.Z)(s,a),hidden:n},t)}},65488:(e,t,n)=>{n.d(t,{Z:()=>p});var l=n(87462),r=n(67294),s=n(86010),a=n(72389),o=n(67392),u=n(7094),i=n(12466);const m="tabList__CuJ",d="tabItem_LNqP";function c(e){var t;const{lazy:n,block:a,defaultValue:c,values:p,groupId:k,className:y}=e,b=r.Children.map(e.children,(e=>{if((0,r.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),f=p??b.map((e=>{let{props:{value:t,label:n,attributes:l}}=e;return{value:t,label:n,attributes:l}})),h=(0,o.l)(f,((e,t)=>e.value===t.value));if(h.length>0)throw new Error(`Docusaurus error: Duplicate values "${h.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const S=null===c?c:c??(null==(t=b.find((e=>e.props.default)))?void 0:t.props.value)??b[0].props.value;if(null!==S&&!f.some((e=>e.value===S)))throw new Error(`Docusaurus error: The has a defaultValue "${S}" but none of its children has the corresponding value. Available values are: ${f.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:v,setTabGroupChoices:g}=(0,u.U)(),[O,D]=(0,r.useState)(S),E=[],{blockElementScrollPositionUntilNextRender:T}=(0,i.o5)();if(null!=k){const e=v[k];null!=e&&e!==O&&f.some((t=>t.value===e))&&D(e)}const R=e=>{const t=e.currentTarget,n=E.indexOf(t),l=f[n].value;l!==O&&(T(t),D(l),null!=k&&g(k,String(l)))},w=e=>{var t;let n=null;switch(e.key){case"Enter":R(e);break;case"ArrowRight":{const t=E.indexOf(e.currentTarget)+1;n=E[t]??E[0];break}case"ArrowLeft":{const t=E.indexOf(e.currentTarget)-1;n=E[t]??E[E.length-1];break}}null==(t=n)||t.focus()};return r.createElement("div",{className:(0,s.Z)("tabs-container",m)},r.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,s.Z)("tabs",{"tabs--block":a},y)},f.map((e=>{let{value:t,label:n,attributes:a}=e;return r.createElement("li",(0,l.Z)({role:"tab",tabIndex:O===t?0:-1,"aria-selected":O===t,key:t,ref:e=>E.push(e),onKeyDown:w,onClick:R},a,{className:(0,s.Z)("tabs__item",d,null==a?void 0:a.className,{"tabs__item--active":O===t})}),n??t)}))),n?(0,r.cloneElement)(b.filter((e=>e.props.value===O))[0],{className:"margin-top--md"}):r.createElement("div",{className:"margin-top--md"},b.map(((e,t)=>(0,r.cloneElement)(e,{key:t,hidden:e.props.value!==O})))))}function p(e){const t=(0,a.Z)();return r.createElement(c,(0,l.Z)({key:String(t)},e))}},93880:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>m,contentTitle:()=>u,default:()=>p,frontMatter:()=>o,metadata:()=>i,toc:()=>d});var l=n(87462),r=(n(67294),n(3905)),s=n(65488),a=n(85162);const o={},u=void 0,i={unversionedId:"howtos/quick-start/cheat-sheets/sets",id:"howtos/quick-start/cheat-sheets/sets",title:"sets",description:"{r.d(t,{Zo:()=>u,kt:()=>m});var n=r(67294);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function l(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var i=n.createContext({}),c=function(e){var t=n.useContext(i),r=t;return e&&(r="function"==typeof e?e(t):l(l({},t),e)),r},u=function(e){var t=c(e.components);return n.createElement(i.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},p=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,o=e.originalType,i=e.parentName,u=s(e,["components","mdxType","originalType","parentName"]),p=c(r),m=a,h=p["".concat(i,".").concat(m)]||p[m]||d[m]||o;return r?n.createElement(h,l(l({ref:t},u),{},{components:r})):n.createElement(h,l({ref:t},u))}));function m(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=r.length,l=new Array(o);l[0]=p;var s={};for(var i in t)hasOwnProperty.call(t,i)&&(s[i]=t[i]);s.originalType=e,s.mdxType="string"==typeof e?e:a,l[1]=s;for(var c=2;c{r.d(t,{Z:()=>l});var n=r(67294),a=r(86010);const o="tabItem_Ymn6";function l(e){let{children:t,hidden:r,className:l}=e;return n.createElement("div",{role:"tabpanel",className:(0,a.Z)(o,l),hidden:r},t)}},65488:(e,t,r)=>{r.d(t,{Z:()=>m});var n=r(87462),a=r(67294),o=r(86010),l=r(72389),s=r(67392),i=r(7094),c=r(12466);const u="tabList__CuJ",d="tabItem_LNqP";function p(e){var t;const{lazy:r,block:l,defaultValue:p,values:m,groupId:h,className:f}=e,b=a.Children.map(e.children,(e=>{if((0,a.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),v=m??b.map((e=>{let{props:{value:t,label:r,attributes:n}}=e;return{value:t,label:r,attributes:n}})),y=(0,s.l)(v,((e,t)=>e.value===t.value));if(y.length>0)throw new Error(`Docusaurus error: Duplicate values "${y.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const g=null===p?p:p??(null==(t=b.find((e=>e.props.default)))?void 0:t.props.value)??b[0].props.value;if(null!==g&&!v.some((e=>e.value===g)))throw new Error(`Docusaurus error: The has a defaultValue "${g}" but none of its children has the corresponding value. Available values are: ${v.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:k,setTabGroupChoices:w}=(0,i.U)(),[E,T]=(0,a.useState)(g),O=[],{blockElementScrollPositionUntilNextRender:I}=(0,c.o5)();if(null!=h){const e=k[h];null!=e&&e!==E&&v.some((t=>t.value===e))&&T(e)}const N=e=>{const t=e.currentTarget,r=O.indexOf(t),n=v[r].value;n!==E&&(I(t),T(n),null!=h&&w(h,String(n)))},x=e=>{var t;let r=null;switch(e.key){case"Enter":N(e);break;case"ArrowRight":{const t=O.indexOf(e.currentTarget)+1;r=O[t]??O[0];break}case"ArrowLeft":{const t=O.indexOf(e.currentTarget)-1;r=O[t]??O[O.length-1];break}}null==(t=r)||t.focus()};return a.createElement("div",{className:(0,o.Z)("tabs-container",u)},a.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,o.Z)("tabs",{"tabs--block":l},f)},v.map((e=>{let{value:t,label:r,attributes:l}=e;return a.createElement("li",(0,n.Z)({role:"tab",tabIndex:E===t?0:-1,"aria-selected":E===t,key:t,ref:e=>O.push(e),onKeyDown:x,onClick:N},l,{className:(0,o.Z)("tabs__item",d,null==l?void 0:l.className,{"tabs__item--active":E===t})}),r??t)}))),r?(0,a.cloneElement)(b.filter((e=>e.props.value===E))[0],{className:"margin-top--md"}):a.createElement("div",{className:"margin-top--md"},b.map(((e,t)=>(0,a.cloneElement)(e,{key:t,hidden:e.props.value!==E})))))}function m(e){const t=(0,l.Z)();return a.createElement(p,(0,n.Z)({key:String(t)},e))}},64500:(e,t,r)=>{r.r(t),r.d(t,{assets:()=>u,contentTitle:()=>i,default:()=>m,frontMatter:()=>s,metadata:()=>c,toc:()=>d});var n=r(87462),a=(r(67294),r(3905)),o=r(65488),l=r(85162);const s={},i=void 0,c={unversionedId:"howtos/quick-start/cheat-sheets/connect",id:"howtos/quick-start/cheat-sheets/connect",title:"connect",description:" redis-cli\n\n"))),(0,a.kt)(l.Z,{value:"REDIS_INSIGHT",mdxType:"TabItem"},(0,a.kt)("p",null,"Download ",(0,a.kt)("u",null,(0,a.kt)("a",{parentName:"p",href:"https://redis.com/redis-enterprise/redis-insight/"},"RedisInsight"))," to visually explore your Redis data or to engage with raw Redis commands in the workbench. Dive deeper into RedisInsight with these ",(0,a.kt)("u",null,(0,a.kt)("a",{parentName:"p",href:"/explore/redisinsight/"},"tutorials")),"."),(0,a.kt)("p",null,(0,a.kt)("img",{alt:"redis-insight-connect",src:r(40083).Z,width:"1896",height:"954"}))),(0,a.kt)(l.Z,{value:"NODE_JS",mdxType:"TabItem"},(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-js"},"import { createClient } from 'redis';\n\nlet client = createClient({ url: 'redis://localhost:6379' });\n\nawait client.connect();\n\n//await client.set('key', 'value');\n\nawait client.disconnect();\n")))))}m.isMDXComponent=!0},40083:(e,t,r)=>{r.d(t,{Z:()=>n});const n=r.p+"assets/images/redis-insight-connect-ca60cd0282c3a317e5608da154f94867.png"}}]); \ No newline at end of file diff --git a/assets/js/872db8be.cbbddffb.js b/assets/js/872db8be.cbbddffb.js new file mode 100644 index 0000000000..e2f9268a3b --- /dev/null +++ b/assets/js/872db8be.cbbddffb.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkredis_developer_hub=self.webpackChunkredis_developer_hub||[]).push([[1309],{3905:(e,t,n)=>{n.d(t,{Zo:()=>u,kt:()=>m});var r=n(67294);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function l(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function o(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var i=r.createContext({}),c=function(e){var t=r.useContext(i),n=t;return e&&(n="function"==typeof e?e(t):o(o({},t),e)),n},u=function(e){var t=c(e.components);return r.createElement(i.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},p=r.forwardRef((function(e,t){var n=e.components,a=e.mdxType,l=e.originalType,i=e.parentName,u=s(e,["components","mdxType","originalType","parentName"]),p=c(n),m=a,h=p["".concat(i,".").concat(m)]||p[m]||d[m]||l;return n?r.createElement(h,o(o({ref:t},u),{},{components:n})):r.createElement(h,o({ref:t},u))}));function m(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var l=n.length,o=new Array(l);o[0]=p;var s={};for(var i in t)hasOwnProperty.call(t,i)&&(s[i]=t[i]);s.originalType=e,s.mdxType="string"==typeof e?e:a,o[1]=s;for(var c=2;c{n.d(t,{Z:()=>o});var r=n(67294),a=n(86010);const l="tabItem_Ymn6";function o(e){let{children:t,hidden:n,className:o}=e;return r.createElement("div",{role:"tabpanel",className:(0,a.Z)(l,o),hidden:n},t)}},65488:(e,t,n)=>{n.d(t,{Z:()=>m});var r=n(87462),a=n(67294),l=n(86010),o=n(72389),s=n(67392),i=n(7094),c=n(12466);const u="tabList__CuJ",d="tabItem_LNqP";function p(e){var t;const{lazy:n,block:o,defaultValue:p,values:m,groupId:h,className:b}=e,v=a.Children.map(e.children,(e=>{if((0,a.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),f=m??v.map((e=>{let{props:{value:t,label:n,attributes:r}}=e;return{value:t,label:n,attributes:r}})),y=(0,s.l)(f,((e,t)=>e.value===t.value));if(y.length>0)throw new Error(`Docusaurus error: Duplicate values "${y.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const g=null===p?p:p??(null==(t=v.find((e=>e.props.default)))?void 0:t.props.value)??v[0].props.value;if(null!==g&&!f.some((e=>e.value===g)))throw new Error(`Docusaurus error: The has a defaultValue "${g}" but none of its children has the corresponding value. Available values are: ${f.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:k,setTabGroupChoices:w}=(0,i.U)(),[T,E]=(0,a.useState)(g),N=[],{blockElementScrollPositionUntilNextRender:O}=(0,c.o5)();if(null!=h){const e=k[h];null!=e&&e!==T&&f.some((t=>t.value===e))&&E(e)}const I=e=>{const t=e.currentTarget,n=N.indexOf(t),r=f[n].value;r!==T&&(O(t),E(r),null!=h&&w(h,String(r)))},x=e=>{var t;let n=null;switch(e.key){case"Enter":I(e);break;case"ArrowRight":{const t=N.indexOf(e.currentTarget)+1;n=N[t]??N[0];break}case"ArrowLeft":{const t=N.indexOf(e.currentTarget)-1;n=N[t]??N[N.length-1];break}}null==(t=n)||t.focus()};return a.createElement("div",{className:(0,l.Z)("tabs-container",u)},a.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,l.Z)("tabs",{"tabs--block":o},b)},f.map((e=>{let{value:t,label:n,attributes:o}=e;return a.createElement("li",(0,r.Z)({role:"tab",tabIndex:T===t?0:-1,"aria-selected":T===t,key:t,ref:e=>N.push(e),onKeyDown:x,onClick:I},o,{className:(0,l.Z)("tabs__item",d,null==o?void 0:o.className,{"tabs__item--active":T===t})}),n??t)}))),n?(0,a.cloneElement)(v.filter((e=>e.props.value===T))[0],{className:"margin-top--md"}):a.createElement("div",{className:"margin-top--md"},v.map(((e,t)=>(0,a.cloneElement)(e,{key:t,hidden:e.props.value!==T})))))}function m(e){const t=(0,o.Z)();return a.createElement(p,(0,r.Z)({key:String(t)},e))}},64500:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>u,contentTitle:()=>i,default:()=>m,frontMatter:()=>s,metadata:()=>c,toc:()=>d});var r=n(87462),a=(n(67294),n(3905)),l=n(65488),o=n(85162);const s={},i=void 0,c={unversionedId:"howtos/quick-start/cheat-sheets/connect",id:"howtos/quick-start/cheat-sheets/connect",title:"connect",description:" redis-cli\n\n"))),(0,a.kt)(o.Z,{value:"REDIS_INSIGHT",mdxType:"TabItem"},(0,a.kt)("p",null,"Download ",(0,a.kt)("u",null,(0,a.kt)("a",{parentName:"p",href:"https://redis.com/redis-enterprise/redis-insight/"},"RedisInsight"))," to visually explore your Redis data or to engage with raw Redis commands in the workbench. Dive deeper into RedisInsight with these ",(0,a.kt)("u",null,(0,a.kt)("a",{parentName:"p",href:"/explore/redisinsight/"},"tutorials")),"."),(0,a.kt)("p",null,(0,a.kt)("img",{alt:"redis-insight-connect",src:n(40083).Z,width:"1896",height:"954"}))),(0,a.kt)(o.Z,{value:"NODE_JS",mdxType:"TabItem"},(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-js"},"import { createClient } from 'redis';\n\nlet client = createClient({ url: 'redis://localhost:6379' });\n\nawait client.connect();\n\n//await client.set('key', 'value');\n\nawait client.disconnect();\n"))),(0,a.kt)(o.Z,{value:"PYTHON",mdxType:"TabItem"},(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-python"},"import redis\n\nr = redis.Redis(host='localhost', port=6379, db=0)\n"))),(0,a.kt)(o.Z,{value:"C#",mdxType:"TabItem"},(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-csharp"},'using StackExchange.Redis;\n\nConnectionMultiplexer redis = ConnectionMultiplexer.Connect("localhost");\nIDatabase db = redis.GetDatabase();\n'))),(0,a.kt)(o.Z,{value:"JAVA",mdxType:"TabItem"},(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-java"},'import redis.clients.jedis.JedisPooled;\n\nJedisPooled jedis = new JedisPooled("localhost", 6379);\n')))))}m.isMDXComponent=!0},40083:(e,t,n)=>{n.d(t,{Z:()=>r});const r=n.p+"assets/images/redis-insight-connect-ca60cd0282c3a317e5608da154f94867.png"}}]); \ No newline at end of file diff --git a/assets/js/94f8b490.ab3ddf7d.js b/assets/js/94f8b490.ab3ddf7d.js deleted file mode 100644 index b904af2abe..0000000000 --- a/assets/js/94f8b490.ab3ddf7d.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkredis_developer_hub=self.webpackChunkredis_developer_hub||[]).push([[9045],{3905:(e,t,n)=>{n.d(t,{Zo:()=>c,kt:()=>p});var r=n(67294);function l(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function a(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function s(e){for(var t=1;t=0||(l[n]=e[n]);return l}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(l[n]=e[n])}return l}var i=r.createContext({}),u=function(e){var t=r.useContext(i),n=t;return e&&(n="function"==typeof e?e(t):s(s({},t),e)),n},c=function(e){var t=u(e.components);return r.createElement(i.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},m=r.forwardRef((function(e,t){var n=e.components,l=e.mdxType,a=e.originalType,i=e.parentName,c=o(e,["components","mdxType","originalType","parentName"]),m=u(n),p=l,k=m["".concat(i,".").concat(p)]||m[p]||d[p]||a;return n?r.createElement(k,s(s({ref:t},c),{},{components:n})):r.createElement(k,s({ref:t},c))}));function p(e,t){var n=arguments,l=t&&t.mdxType;if("string"==typeof e||l){var a=n.length,s=new Array(a);s[0]=m;var o={};for(var i in t)hasOwnProperty.call(t,i)&&(o[i]=t[i]);o.originalType=e,o.mdxType="string"==typeof e?e:l,s[1]=o;for(var u=2;u{n.d(t,{Z:()=>s});var r=n(67294),l=n(86010);const a="tabItem_Ymn6";function s(e){let{children:t,hidden:n,className:s}=e;return r.createElement("div",{role:"tabpanel",className:(0,l.Z)(a,s),hidden:n},t)}},65488:(e,t,n)=>{n.d(t,{Z:()=>p});var r=n(87462),l=n(67294),a=n(86010),s=n(72389),o=n(67392),i=n(7094),u=n(12466);const c="tabList__CuJ",d="tabItem_LNqP";function m(e){var t;const{lazy:n,block:s,defaultValue:m,values:p,groupId:k,className:h}=e,f=l.Children.map(e.children,(e=>{if((0,l.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),y=p??f.map((e=>{let{props:{value:t,label:n,attributes:r}}=e;return{value:t,label:n,attributes:r}})),b=(0,o.l)(y,((e,t)=>e.value===t.value));if(b.length>0)throw new Error(`Docusaurus error: Duplicate values "${b.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const g=null===m?m:m??(null==(t=f.find((e=>e.props.default)))?void 0:t.props.value)??f[0].props.value;if(null!==g&&!y.some((e=>e.value===g)))throw new Error(`Docusaurus error: The has a defaultValue "${g}" but none of its children has the corresponding value. Available values are: ${y.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:v,setTabGroupChoices:O}=(0,i.U)(),[w,D]=(0,l.useState)(g),E=[],{blockElementScrollPositionUntilNextRender:T}=(0,u.o5)();if(null!=k){const e=v[k];null!=e&&e!==w&&y.some((t=>t.value===e))&&D(e)}const N=e=>{const t=e.currentTarget,n=E.indexOf(t),r=y[n].value;r!==w&&(T(t),D(r),null!=k&&O(k,String(r)))},R=e=>{var t;let n=null;switch(e.key){case"Enter":N(e);break;case"ArrowRight":{const t=E.indexOf(e.currentTarget)+1;n=E[t]??E[0];break}case"ArrowLeft":{const t=E.indexOf(e.currentTarget)-1;n=E[t]??E[E.length-1];break}}null==(t=n)||t.focus()};return l.createElement("div",{className:(0,a.Z)("tabs-container",c)},l.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,a.Z)("tabs",{"tabs--block":s},h)},y.map((e=>{let{value:t,label:n,attributes:s}=e;return l.createElement("li",(0,r.Z)({role:"tab",tabIndex:w===t?0:-1,"aria-selected":w===t,key:t,ref:e=>E.push(e),onKeyDown:R,onClick:N},s,{className:(0,a.Z)("tabs__item",d,null==s?void 0:s.className,{"tabs__item--active":w===t})}),n??t)}))),n?(0,l.cloneElement)(f.filter((e=>e.props.value===w))[0],{className:"margin-top--md"}):l.createElement("div",{className:"margin-top--md"},f.map(((e,t)=>(0,l.cloneElement)(e,{key:t,hidden:e.props.value!==w})))))}function p(e){const t=(0,s.Z)();return l.createElement(m,(0,r.Z)({key:String(t)},e))}},135:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>i,default:()=>p,frontMatter:()=>o,metadata:()=>u,toc:()=>d});var r=n(87462),l=(n(67294),n(3905)),a=n(65488),s=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/streams",id:"howtos/quick-start/cheat-sheets/streams",title:"streams",description:"{n.d(t,{Zo:()=>m,kt:()=>p});var r=n(67294);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function l(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function s(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var o=r.createContext({}),u=function(e){var t=r.useContext(o),n=t;return e&&(n="function"==typeof e?e(t):s(s({},t),e)),n},m=function(e){var t=u(e.components);return r.createElement(o.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},c=r.forwardRef((function(e,t){var n=e.components,a=e.mdxType,l=e.originalType,o=e.parentName,m=i(e,["components","mdxType","originalType","parentName"]),c=u(n),p=a,y=c["".concat(o,".").concat(p)]||c[p]||d[p]||l;return n?r.createElement(y,s(s({ref:t},m),{},{components:n})):r.createElement(y,s({ref:t},m))}));function p(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var l=n.length,s=new Array(l);s[0]=c;var i={};for(var o in t)hasOwnProperty.call(t,o)&&(i[o]=t[o]);i.originalType=e,i.mdxType="string"==typeof e?e:a,s[1]=i;for(var u=2;u{n.d(t,{Z:()=>s});var r=n(67294),a=n(86010);const l="tabItem_Ymn6";function s(e){let{children:t,hidden:n,className:s}=e;return r.createElement("div",{role:"tabpanel",className:(0,a.Z)(l,s),hidden:n},t)}},65488:(e,t,n)=>{n.d(t,{Z:()=>p});var r=n(87462),a=n(67294),l=n(86010),s=n(72389),i=n(67392),o=n(7094),u=n(12466);const m="tabList__CuJ",d="tabItem_LNqP";function c(e){var t;const{lazy:n,block:s,defaultValue:c,values:p,groupId:y,className:h}=e,k=a.Children.map(e.children,(e=>{if((0,a.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),f=p??k.map((e=>{let{props:{value:t,label:n,attributes:r}}=e;return{value:t,label:n,attributes:r}})),b=(0,i.l)(f,((e,t)=>e.value===t.value));if(b.length>0)throw new Error(`Docusaurus error: Duplicate values "${b.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const g=null===c?c:c??(null==(t=k.find((e=>e.props.default)))?void 0:t.props.value)??k[0].props.value;if(null!==g&&!f.some((e=>e.value===g)))throw new Error(`Docusaurus error: The has a defaultValue "${g}" but none of its children has the corresponding value. Available values are: ${f.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:v,setTabGroupChoices:N}=(0,o.U)(),[D,E]=(0,a.useState)(g),w=[],{blockElementScrollPositionUntilNextRender:O}=(0,u.o5)();if(null!=y){const e=v[y];null!=e&&e!==D&&f.some((t=>t.value===e))&&E(e)}const T=e=>{const t=e.currentTarget,n=w.indexOf(t),r=f[n].value;r!==D&&(O(t),E(r),null!=y&&N(y,String(r)))},R=e=>{var t;let n=null;switch(e.key){case"Enter":T(e);break;case"ArrowRight":{const t=w.indexOf(e.currentTarget)+1;n=w[t]??w[0];break}case"ArrowLeft":{const t=w.indexOf(e.currentTarget)-1;n=w[t]??w[w.length-1];break}}null==(t=n)||t.focus()};return a.createElement("div",{className:(0,l.Z)("tabs-container",m)},a.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,l.Z)("tabs",{"tabs--block":s},h)},f.map((e=>{let{value:t,label:n,attributes:s}=e;return a.createElement("li",(0,r.Z)({role:"tab",tabIndex:D===t?0:-1,"aria-selected":D===t,key:t,ref:e=>w.push(e),onKeyDown:R,onClick:T},s,{className:(0,l.Z)("tabs__item",d,null==s?void 0:s.className,{"tabs__item--active":D===t})}),n??t)}))),n?(0,a.cloneElement)(k.filter((e=>e.props.value===D))[0],{className:"margin-top--md"}):a.createElement("div",{className:"margin-top--md"},k.map(((e,t)=>(0,a.cloneElement)(e,{key:t,hidden:e.props.value!==D})))))}function p(e){const t=(0,s.Z)();return a.createElement(c,(0,r.Z)({key:String(t)},e))}},135:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>m,contentTitle:()=>o,default:()=>p,frontMatter:()=>i,metadata:()=>u,toc:()=>d});var r=n(87462),a=(n(67294),n(3905)),l=n(65488),s=n(85162);const i={},o=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/streams",id:"howtos/quick-start/cheat-sheets/streams",title:"streams",description:"{l.d(t,{Zo:()=>c,kt:()=>h});var n=l(67294);function a(e,t,l){return t in e?Object.defineProperty(e,t,{value:l,enumerable:!0,configurable:!0,writable:!0}):e[t]=l,e}function r(e,t){var l=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),l.push.apply(l,n)}return l}function o(e){for(var t=1;t=0||(a[l]=e[l]);return a}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,l)&&(a[l]=e[l])}return a}var i=n.createContext({}),u=function(e){var t=n.useContext(i),l=t;return e&&(l="function"==typeof e?e(t):o(o({},t),e)),l},c=function(e){var t=u(e.components);return n.createElement(i.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},p=n.forwardRef((function(e,t){var l=e.components,a=e.mdxType,r=e.originalType,i=e.parentName,c=s(e,["components","mdxType","originalType","parentName"]),p=u(l),h=a,m=p["".concat(i,".").concat(h)]||p[h]||d[h]||r;return l?n.createElement(m,o(o({ref:t},c),{},{components:l})):n.createElement(m,o({ref:t},c))}));function h(e,t){var l=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var r=l.length,o=new Array(r);o[0]=p;var s={};for(var i in t)hasOwnProperty.call(t,i)&&(s[i]=t[i]);s.originalType=e,s.mdxType="string"==typeof e?e:a,o[1]=s;for(var u=2;u{l.d(t,{Z:()=>o});var n=l(67294),a=l(86010);const r="tabItem_Ymn6";function o(e){let{children:t,hidden:l,className:o}=e;return n.createElement("div",{role:"tabpanel",className:(0,a.Z)(r,o),hidden:l},t)}},65488:(e,t,l)=>{l.d(t,{Z:()=>h});var n=l(87462),a=l(67294),r=l(86010),o=l(72389),s=l(67392),i=l(7094),u=l(12466);const c="tabList__CuJ",d="tabItem_LNqP";function p(e){var t;const{lazy:l,block:o,defaultValue:p,values:h,groupId:m,className:f}=e,k=a.Children.map(e.children,(e=>{if((0,a.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),y=h??k.map((e=>{let{props:{value:t,label:l,attributes:n}}=e;return{value:t,label:l,attributes:n}})),v=(0,s.l)(y,((e,t)=>e.value===t.value));if(v.length>0)throw new Error(`Docusaurus error: Duplicate values "${v.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const b=null===p?p:p??(null==(t=k.find((e=>e.props.default)))?void 0:t.props.value)??k[0].props.value;if(null!==b&&!y.some((e=>e.value===b)))throw new Error(`Docusaurus error: The has a defaultValue "${b}" but none of its children has the corresponding value. Available values are: ${y.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:g,setTabGroupChoices:T}=(0,i.U)(),[E,w]=(0,a.useState)(b),O=[],{blockElementScrollPositionUntilNextRender:N}=(0,u.o5)();if(null!=m){const e=g[m];null!=e&&e!==E&&y.some((t=>t.value===e))&&w(e)}const _=e=>{const t=e.currentTarget,l=O.indexOf(t),n=y[l].value;n!==E&&(N(t),w(n),null!=m&&T(m,String(n)))},x=e=>{var t;let l=null;switch(e.key){case"Enter":_(e);break;case"ArrowRight":{const t=O.indexOf(e.currentTarget)+1;l=O[t]??O[0];break}case"ArrowLeft":{const t=O.indexOf(e.currentTarget)-1;l=O[t]??O[O.length-1];break}}null==(t=l)||t.focus()};return a.createElement("div",{className:(0,r.Z)("tabs-container",c)},a.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,r.Z)("tabs",{"tabs--block":o},f)},y.map((e=>{let{value:t,label:l,attributes:o}=e;return a.createElement("li",(0,n.Z)({role:"tab",tabIndex:E===t?0:-1,"aria-selected":E===t,key:t,ref:e=>O.push(e),onKeyDown:x,onClick:_},o,{className:(0,r.Z)("tabs__item",d,null==o?void 0:o.className,{"tabs__item--active":E===t})}),l??t)}))),l?(0,a.cloneElement)(k.filter((e=>e.props.value===E))[0],{className:"margin-top--md"}):a.createElement("div",{className:"margin-top--md"},k.map(((e,t)=>(0,a.cloneElement)(e,{key:t,hidden:e.props.value!==E})))))}function h(e){const t=(0,o.Z)();return a.createElement(p,(0,n.Z)({key:String(t)},e))}},82749:(e,t,l)=>{l.r(t),l.d(t,{assets:()=>c,contentTitle:()=>i,default:()=>h,frontMatter:()=>s,metadata:()=>u,toc:()=>d});var n=l(87462),a=(l(67294),l(3905)),r=l(65488),o=l(85162);const s={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/hashes",id:"howtos/quick-start/cheat-sheets/hashes",title:"hashes",description:"{l.d(t,{Zo:()=>c,kt:()=>p});var n=l(67294);function a(e,t,l){return t in e?Object.defineProperty(e,t,{value:l,enumerable:!0,configurable:!0,writable:!0}):e[t]=l,e}function r(e,t){var l=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),l.push.apply(l,n)}return l}function s(e){for(var t=1;t=0||(a[l]=e[l]);return a}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,l)&&(a[l]=e[l])}return a}var i=n.createContext({}),u=function(e){var t=n.useContext(i),l=t;return e&&(l="function"==typeof e?e(t):s(s({},t),e)),l},c=function(e){var t=u(e.components);return n.createElement(i.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},h=n.forwardRef((function(e,t){var l=e.components,a=e.mdxType,r=e.originalType,i=e.parentName,c=o(e,["components","mdxType","originalType","parentName"]),h=u(l),p=a,m=h["".concat(i,".").concat(p)]||h[p]||d[p]||r;return l?n.createElement(m,s(s({ref:t},c),{},{components:l})):n.createElement(m,s({ref:t},c))}));function p(e,t){var l=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var r=l.length,s=new Array(r);s[0]=h;var o={};for(var i in t)hasOwnProperty.call(t,i)&&(o[i]=t[i]);o.originalType=e,o.mdxType="string"==typeof e?e:a,s[1]=o;for(var u=2;u{l.d(t,{Z:()=>s});var n=l(67294),a=l(86010);const r="tabItem_Ymn6";function s(e){let{children:t,hidden:l,className:s}=e;return n.createElement("div",{role:"tabpanel",className:(0,a.Z)(r,s),hidden:l},t)}},65488:(e,t,l)=>{l.d(t,{Z:()=>p});var n=l(87462),a=l(67294),r=l(86010),s=l(72389),o=l(67392),i=l(7094),u=l(12466);const c="tabList__CuJ",d="tabItem_LNqP";function h(e){var t;const{lazy:l,block:s,defaultValue:h,values:p,groupId:m,className:f}=e,k=a.Children.map(e.children,(e=>{if((0,a.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),y=p??k.map((e=>{let{props:{value:t,label:l,attributes:n}}=e;return{value:t,label:l,attributes:n}})),v=(0,o.l)(y,((e,t)=>e.value===t.value));if(v.length>0)throw new Error(`Docusaurus error: Duplicate values "${v.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const b=null===h?h:h??(null==(t=k.find((e=>e.props.default)))?void 0:t.props.value)??k[0].props.value;if(null!==b&&!y.some((e=>e.value===b)))throw new Error(`Docusaurus error: The has a defaultValue "${b}" but none of its children has the corresponding value. Available values are: ${y.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:T,setTabGroupChoices:g}=(0,i.U)(),[E,O]=(0,a.useState)(b),N=[],{blockElementScrollPositionUntilNextRender:_}=(0,u.o5)();if(null!=m){const e=T[m];null!=e&&e!==E&&y.some((t=>t.value===e))&&O(e)}const w=e=>{const t=e.currentTarget,l=N.indexOf(t),n=y[l].value;n!==E&&(_(t),O(n),null!=m&&g(m,String(n)))},H=e=>{var t;let l=null;switch(e.key){case"Enter":w(e);break;case"ArrowRight":{const t=N.indexOf(e.currentTarget)+1;l=N[t]??N[0];break}case"ArrowLeft":{const t=N.indexOf(e.currentTarget)-1;l=N[t]??N[N.length-1];break}}null==(t=l)||t.focus()};return a.createElement("div",{className:(0,r.Z)("tabs-container",c)},a.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,r.Z)("tabs",{"tabs--block":s},f)},y.map((e=>{let{value:t,label:l,attributes:s}=e;return a.createElement("li",(0,n.Z)({role:"tab",tabIndex:E===t?0:-1,"aria-selected":E===t,key:t,ref:e=>N.push(e),onKeyDown:H,onClick:w},s,{className:(0,r.Z)("tabs__item",d,null==s?void 0:s.className,{"tabs__item--active":E===t})}),l??t)}))),l?(0,a.cloneElement)(k.filter((e=>e.props.value===E))[0],{className:"margin-top--md"}):a.createElement("div",{className:"margin-top--md"},k.map(((e,t)=>(0,a.cloneElement)(e,{key:t,hidden:e.props.value!==E})))))}function p(e){const t=(0,s.Z)();return a.createElement(h,(0,n.Z)({key:String(t)},e))}},82749:(e,t,l)=>{l.r(t),l.d(t,{assets:()=>c,contentTitle:()=>i,default:()=>p,frontMatter:()=>o,metadata:()=>u,toc:()=>d});var n=l(87462),a=(l(67294),l(3905)),r=l(65488),s=l(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/hashes",id:"howtos/quick-start/cheat-sheets/hashes",title:"hashes",description:"{n.d(t,{Zo:()=>c,kt:()=>y});var l=n(67294);function r(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function a(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);t&&(l=l.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,l)}return n}function s(e){for(var t=1;t=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(l=0;l=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var i=l.createContext({}),u=function(e){var t=l.useContext(i),n=t;return e&&(n="function"==typeof e?e(t):s(s({},t),e)),n},c=function(e){var t=u(e.components);return l.createElement(i.Provider,{value:t},e.children)},m={inlineCode:"code",wrapper:function(e){var t=e.children;return l.createElement(l.Fragment,{},t)}},p=l.forwardRef((function(e,t){var n=e.components,r=e.mdxType,a=e.originalType,i=e.parentName,c=o(e,["components","mdxType","originalType","parentName"]),p=u(n),y=r,d=p["".concat(i,".").concat(y)]||p[y]||m[y]||a;return n?l.createElement(d,s(s({ref:t},c),{},{components:n})):l.createElement(d,s({ref:t},c))}));function y(e,t){var n=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var a=n.length,s=new Array(a);s[0]=p;var o={};for(var i in t)hasOwnProperty.call(t,i)&&(o[i]=t[i]);o.originalType=e,o.mdxType="string"==typeof e?e:r,s[1]=o;for(var u=2;u{n.d(t,{Z:()=>s});var l=n(67294),r=n(86010);const a="tabItem_Ymn6";function s(e){let{children:t,hidden:n,className:s}=e;return l.createElement("div",{role:"tabpanel",className:(0,r.Z)(a,s),hidden:n},t)}},65488:(e,t,n)=>{n.d(t,{Z:()=>y});var l=n(87462),r=n(67294),a=n(86010),s=n(72389),o=n(67392),i=n(7094),u=n(12466);const c="tabList__CuJ",m="tabItem_LNqP";function p(e){var t;const{lazy:n,block:s,defaultValue:p,values:y,groupId:d,className:k}=e,h=r.Children.map(e.children,(e=>{if((0,r.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),f=y??h.map((e=>{let{props:{value:t,label:n,attributes:l}}=e;return{value:t,label:n,attributes:l}})),b=(0,o.l)(f,((e,t)=>e.value===t.value));if(b.length>0)throw new Error(`Docusaurus error: Duplicate values "${b.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const v=null===p?p:p??(null==(t=h.find((e=>e.props.default)))?void 0:t.props.value)??h[0].props.value;if(null!==v&&!f.some((e=>e.value===v)))throw new Error(`Docusaurus error: The has a defaultValue "${v}" but none of its children has the corresponding value. Available values are: ${f.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:g,setTabGroupChoices:T}=(0,i.U)(),[O,E]=(0,r.useState)(v),R=[],{blockElementScrollPositionUntilNextRender:N}=(0,u.o5)();if(null!=d){const e=g[d];null!=e&&e!==O&&f.some((t=>t.value===e))&&E(e)}const C=e=>{const t=e.currentTarget,n=R.indexOf(t),l=f[n].value;l!==O&&(N(t),E(l),null!=d&&T(d,String(l)))},x=e=>{var t;let n=null;switch(e.key){case"Enter":C(e);break;case"ArrowRight":{const t=R.indexOf(e.currentTarget)+1;n=R[t]??R[0];break}case"ArrowLeft":{const t=R.indexOf(e.currentTarget)-1;n=R[t]??R[R.length-1];break}}null==(t=n)||t.focus()};return r.createElement("div",{className:(0,a.Z)("tabs-container",c)},r.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,a.Z)("tabs",{"tabs--block":s},k)},f.map((e=>{let{value:t,label:n,attributes:s}=e;return r.createElement("li",(0,l.Z)({role:"tab",tabIndex:O===t?0:-1,"aria-selected":O===t,key:t,ref:e=>R.push(e),onKeyDown:x,onClick:C},s,{className:(0,a.Z)("tabs__item",m,null==s?void 0:s.className,{"tabs__item--active":O===t})}),n??t)}))),n?(0,r.cloneElement)(h.filter((e=>e.props.value===O))[0],{className:"margin-top--md"}):r.createElement("div",{className:"margin-top--md"},h.map(((e,t)=>(0,r.cloneElement)(e,{key:t,hidden:e.props.value!==O})))))}function y(e){const t=(0,s.Z)();return r.createElement(p,(0,l.Z)({key:String(t)},e))}},53742:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>i,default:()=>y,frontMatter:()=>o,metadata:()=>u,toc:()=>m});var l=n(87462),r=(n(67294),n(3905)),a=n(65488),s=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/generic",id:"howtos/quick-start/cheat-sheets/generic",title:"generic",description:" scan = jedis.scan("0", new ScanParams() {\n {\n match("employee_profile:*");\n }\n});\nscan = jedis.scan(scan.getCursor(), new ScanParams() {\n {\n match("employee_profile:*");\n }\n});\n\n/*\n * DEL key [key ...]\n * O(N)\n * Removes the specified keys.\n */\njedis.del("employee_profile:viraj", "employee_profile:terry""employee_profile:sheera");\n\n/*\n * TTL key\n * O(1)\n * Returns the remaining time to live of a key that has a timeout.\n */\njedis.ttl("employee_profile:nicol");\n')))))}y.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/c551111a.610c35d1.js b/assets/js/c551111a.610c35d1.js deleted file mode 100644 index f2f6022e2f..0000000000 --- a/assets/js/c551111a.610c35d1.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkredis_developer_hub=self.webpackChunkredis_developer_hub||[]).push([[3878],{3905:(e,t,n)=>{n.d(t,{Zo:()=>c,kt:()=>p});var l=n(67294);function r(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function s(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);t&&(l=l.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,l)}return n}function o(e){for(var t=1;t=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var s=Object.getOwnPropertySymbols(e);for(l=0;l=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var u=l.createContext({}),i=function(e){var t=l.useContext(u),n=t;return e&&(n="function"==typeof e?e(t):o(o({},t),e)),n},c=function(e){var t=i(e.components);return l.createElement(u.Provider,{value:t},e.children)},m={inlineCode:"code",wrapper:function(e){var t=e.children;return l.createElement(l.Fragment,{},t)}},d=l.forwardRef((function(e,t){var n=e.components,r=e.mdxType,s=e.originalType,u=e.parentName,c=a(e,["components","mdxType","originalType","parentName"]),d=i(n),p=r,k=d["".concat(u,".").concat(p)]||d[p]||m[p]||s;return n?l.createElement(k,o(o({ref:t},c),{},{components:n})):l.createElement(k,o({ref:t},c))}));function p(e,t){var n=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var s=n.length,o=new Array(s);o[0]=d;var a={};for(var u in t)hasOwnProperty.call(t,u)&&(a[u]=t[u]);a.originalType=e,a.mdxType="string"==typeof e?e:r,o[1]=a;for(var i=2;i{n.d(t,{Z:()=>o});var l=n(67294),r=n(86010);const s="tabItem_Ymn6";function o(e){let{children:t,hidden:n,className:o}=e;return l.createElement("div",{role:"tabpanel",className:(0,r.Z)(s,o),hidden:n},t)}},65488:(e,t,n)=>{n.d(t,{Z:()=>p});var l=n(87462),r=n(67294),s=n(86010),o=n(72389),a=n(67392),u=n(7094),i=n(12466);const c="tabList__CuJ",m="tabItem_LNqP";function d(e){var t;const{lazy:n,block:o,defaultValue:d,values:p,groupId:k,className:y}=e,h=r.Children.map(e.children,(e=>{if((0,r.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),f=p??h.map((e=>{let{props:{value:t,label:n,attributes:l}}=e;return{value:t,label:n,attributes:l}})),b=(0,a.l)(f,((e,t)=>e.value===t.value));if(b.length>0)throw new Error(`Docusaurus error: Duplicate values "${b.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const v=null===d?d:d??(null==(t=h.find((e=>e.props.default)))?void 0:t.props.value)??h[0].props.value;if(null!==v&&!f.some((e=>e.value===v)))throw new Error(`Docusaurus error: The has a defaultValue "${v}" but none of its children has the corresponding value. Available values are: ${f.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:g,setTabGroupChoices:T}=(0,u.U)(),[O,E]=(0,r.useState)(v),x=[],{blockElementScrollPositionUntilNextRender:C}=(0,i.o5)();if(null!=k){const e=g[k];null!=e&&e!==O&&f.some((t=>t.value===e))&&E(e)}const w=e=>{const t=e.currentTarget,n=x.indexOf(t),l=f[n].value;l!==O&&(C(t),E(l),null!=k&&T(k,String(l)))},R=e=>{var t;let n=null;switch(e.key){case"Enter":w(e);break;case"ArrowRight":{const t=x.indexOf(e.currentTarget)+1;n=x[t]??x[0];break}case"ArrowLeft":{const t=x.indexOf(e.currentTarget)-1;n=x[t]??x[x.length-1];break}}null==(t=n)||t.focus()};return r.createElement("div",{className:(0,s.Z)("tabs-container",c)},r.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,s.Z)("tabs",{"tabs--block":o},y)},f.map((e=>{let{value:t,label:n,attributes:o}=e;return r.createElement("li",(0,l.Z)({role:"tab",tabIndex:O===t?0:-1,"aria-selected":O===t,key:t,ref:e=>x.push(e),onKeyDown:R,onClick:w},o,{className:(0,s.Z)("tabs__item",m,null==o?void 0:o.className,{"tabs__item--active":O===t})}),n??t)}))),n?(0,r.cloneElement)(h.filter((e=>e.props.value===O))[0],{className:"margin-top--md"}):r.createElement("div",{className:"margin-top--md"},h.map(((e,t)=>(0,r.cloneElement)(e,{key:t,hidden:e.props.value!==O})))))}function p(e){const t=(0,o.Z)();return r.createElement(d,(0,l.Z)({key:String(t)},e))}},53742:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>u,default:()=>p,frontMatter:()=>a,metadata:()=>i,toc:()=>m});var l=n(87462),r=(n(67294),n(3905)),s=n(65488),o=n(85162);const a={},u=void 0,i={unversionedId:"howtos/quick-start/cheat-sheets/generic",id:"howtos/quick-start/cheat-sheets/generic",title:"generic",description:"{n.d(t,{Zo:()=>d,kt:()=>p});var r=n(67294);function s(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function a(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function l(e){for(var t=1;t=0||(s[n]=e[n]);return s}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(s[n]=e[n])}return s}var u=r.createContext({}),i=function(e){var t=r.useContext(u),n=t;return e&&(n="function"==typeof e?e(t):l(l({},t),e)),n},d=function(e){var t=i(e.components);return r.createElement(u.Provider,{value:t},e.children)},c={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},m=r.forwardRef((function(e,t){var n=e.components,s=e.mdxType,a=e.originalType,u=e.parentName,d=o(e,["components","mdxType","originalType","parentName"]),m=i(n),p=s,h=m["".concat(u,".").concat(p)]||m[p]||c[p]||a;return n?r.createElement(h,l(l({ref:t},d),{},{components:n})):r.createElement(h,l({ref:t},d))}));function p(e,t){var n=arguments,s=t&&t.mdxType;if("string"==typeof e||s){var a=n.length,l=new Array(a);l[0]=m;var o={};for(var u in t)hasOwnProperty.call(t,u)&&(o[u]=t[u]);o.originalType=e,o.mdxType="string"==typeof e?e:s,l[1]=o;for(var i=2;i{n.d(t,{Z:()=>l});var r=n(67294),s=n(86010);const a="tabItem_Ymn6";function l(e){let{children:t,hidden:n,className:l}=e;return r.createElement("div",{role:"tabpanel",className:(0,s.Z)(a,l),hidden:n},t)}},65488:(e,t,n)=>{n.d(t,{Z:()=>p});var r=n(87462),s=n(67294),a=n(86010),l=n(72389),o=n(67392),u=n(7094),i=n(12466);const d="tabList__CuJ",c="tabItem_LNqP";function m(e){var t;const{lazy:n,block:l,defaultValue:m,values:p,groupId:h,className:y}=e,b=s.Children.map(e.children,(e=>{if((0,s.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),f=p??b.map((e=>{let{props:{value:t,label:n,attributes:r}}=e;return{value:t,label:n,attributes:r}})),k=(0,o.l)(f,((e,t)=>e.value===t.value));if(k.length>0)throw new Error(`Docusaurus error: Duplicate values "${k.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const v=null===m?m:m??(null==(t=b.find((e=>e.props.default)))?void 0:t.props.value)??b[0].props.value;if(null!==v&&!f.some((e=>e.value===v)))throw new Error(`Docusaurus error: The has a defaultValue "${v}" but none of its children has the corresponding value. Available values are: ${f.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:g,setTabGroupChoices:w}=(0,u.U)(),[O,E]=(0,s.useState)(v),T=[],{blockElementScrollPositionUntilNextRender:N}=(0,i.o5)();if(null!=h){const e=g[h];null!=e&&e!==O&&f.some((t=>t.value===e))&&E(e)}const S=e=>{const t=e.currentTarget,n=T.indexOf(t),r=f[n].value;r!==O&&(N(t),E(r),null!=h&&w(h,String(r)))},Z=e=>{var t;let n=null;switch(e.key){case"Enter":S(e);break;case"ArrowRight":{const t=T.indexOf(e.currentTarget)+1;n=T[t]??T[0];break}case"ArrowLeft":{const t=T.indexOf(e.currentTarget)-1;n=T[t]??T[T.length-1];break}}null==(t=n)||t.focus()};return s.createElement("div",{className:(0,a.Z)("tabs-container",d)},s.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,a.Z)("tabs",{"tabs--block":l},y)},f.map((e=>{let{value:t,label:n,attributes:l}=e;return s.createElement("li",(0,r.Z)({role:"tab",tabIndex:O===t?0:-1,"aria-selected":O===t,key:t,ref:e=>T.push(e),onKeyDown:Z,onClick:S},l,{className:(0,a.Z)("tabs__item",c,null==l?void 0:l.className,{"tabs__item--active":O===t})}),n??t)}))),n?(0,s.cloneElement)(b.filter((e=>e.props.value===O))[0],{className:"margin-top--md"}):s.createElement("div",{className:"margin-top--md"},b.map(((e,t)=>(0,s.cloneElement)(e,{key:t,hidden:e.props.value!==O})))))}function p(e){const t=(0,l.Z)();return s.createElement(m,(0,r.Z)({key:String(t)},e))}},53188:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>u,default:()=>p,frontMatter:()=>o,metadata:()=>i,toc:()=>c});var r=n(87462),s=(n(67294),n(3905)),a=n(65488),l=n(85162);const o={},u=void 0,i={unversionedId:"howtos/quick-start/cheat-sheets/sorted-sets",id:"howtos/quick-start/cheat-sheets/sorted-sets",title:"sorted-sets",description:"{n.d(t,{Zo:()=>c,kt:()=>m});var r=n(67294);function l(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function a(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function o(e){for(var t=1;t=0||(l[n]=e[n]);return l}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(l[n]=e[n])}return l}var u=r.createContext({}),i=function(e){var t=r.useContext(u),n=t;return e&&(n="function"==typeof e?e(t):o(o({},t),e)),n},c=function(e){var t=i(e.components);return r.createElement(u.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},p=r.forwardRef((function(e,t){var n=e.components,l=e.mdxType,a=e.originalType,u=e.parentName,c=s(e,["components","mdxType","originalType","parentName"]),p=i(n),m=l,f=p["".concat(u,".").concat(m)]||p[m]||d[m]||a;return n?r.createElement(f,o(o({ref:t},c),{},{components:n})):r.createElement(f,o({ref:t},c))}));function m(e,t){var n=arguments,l=t&&t.mdxType;if("string"==typeof e||l){var a=n.length,o=new Array(a);o[0]=p;var s={};for(var u in t)hasOwnProperty.call(t,u)&&(s[u]=t[u]);s.originalType=e,s.mdxType="string"==typeof e?e:l,o[1]=s;for(var i=2;i{n.d(t,{Z:()=>o});var r=n(67294),l=n(86010);const a="tabItem_Ymn6";function o(e){let{children:t,hidden:n,className:o}=e;return r.createElement("div",{role:"tabpanel",className:(0,l.Z)(a,o),hidden:n},t)}},65488:(e,t,n)=>{n.d(t,{Z:()=>m});var r=n(87462),l=n(67294),a=n(86010),o=n(72389),s=n(67392),u=n(7094),i=n(12466);const c="tabList__CuJ",d="tabItem_LNqP";function p(e){var t;const{lazy:n,block:o,defaultValue:p,values:m,groupId:f,className:b}=e,h=l.Children.map(e.children,(e=>{if((0,l.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),k=m??h.map((e=>{let{props:{value:t,label:n,attributes:r}}=e;return{value:t,label:n,attributes:r}})),v=(0,s.l)(k,((e,t)=>e.value===t.value));if(v.length>0)throw new Error(`Docusaurus error: Duplicate values "${v.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const y=null===p?p:p??(null==(t=h.find((e=>e.props.default)))?void 0:t.props.value)??h[0].props.value;if(null!==y&&!k.some((e=>e.value===y)))throw new Error(`Docusaurus error: The has a defaultValue "${y}" but none of its children has the corresponding value. Available values are: ${k.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:g,setTabGroupChoices:w}=(0,u.U)(),[O,E]=(0,l.useState)(y),T=[],{blockElementScrollPositionUntilNextRender:N}=(0,i.o5)();if(null!=f){const e=g[f];null!=e&&e!==O&&k.some((t=>t.value===e))&&E(e)}const D=e=>{const t=e.currentTarget,n=T.indexOf(t),r=k[n].value;r!==O&&(N(t),E(r),null!=f&&w(f,String(r)))},Z=e=>{var t;let n=null;switch(e.key){case"Enter":D(e);break;case"ArrowRight":{const t=T.indexOf(e.currentTarget)+1;n=T[t]??T[0];break}case"ArrowLeft":{const t=T.indexOf(e.currentTarget)-1;n=T[t]??T[T.length-1];break}}null==(t=n)||t.focus()};return l.createElement("div",{className:(0,a.Z)("tabs-container",c)},l.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,a.Z)("tabs",{"tabs--block":o},b)},k.map((e=>{let{value:t,label:n,attributes:o}=e;return l.createElement("li",(0,r.Z)({role:"tab",tabIndex:O===t?0:-1,"aria-selected":O===t,key:t,ref:e=>T.push(e),onKeyDown:Z,onClick:D},o,{className:(0,a.Z)("tabs__item",d,null==o?void 0:o.className,{"tabs__item--active":O===t})}),n??t)}))),n?(0,l.cloneElement)(h.filter((e=>e.props.value===O))[0],{className:"margin-top--md"}):l.createElement("div",{className:"margin-top--md"},h.map(((e,t)=>(0,l.cloneElement)(e,{key:t,hidden:e.props.value!==O})))))}function m(e){const t=(0,o.Z)();return l.createElement(p,(0,r.Z)({key:String(t)},e))}},53188:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>u,default:()=>m,frontMatter:()=>s,metadata:()=>i,toc:()=>d});var r=n(87462),l=(n(67294),n(3905)),a=n(65488),o=n(85162);const s={},u=void 0,i={unversionedId:"howtos/quick-start/cheat-sheets/sorted-sets",id:"howtos/quick-start/cheat-sheets/sorted-sets",title:"sorted-sets",description:"{n.d(t,{Zo:()=>d,kt:()=>k});var l=n(67294);function s(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function a(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);t&&(l=l.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,l)}return n}function r(e){for(var t=1;t=0||(s[n]=e[n]);return s}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(l=0;l=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(s[n]=e[n])}return s}var i=l.createContext({}),u=function(e){var t=l.useContext(i),n=t;return e&&(n="function"==typeof e?e(t):r(r({},t),e)),n},d=function(e){var t=u(e.components);return l.createElement(i.Provider,{value:t},e.children)},m={inlineCode:"code",wrapper:function(e){var t=e.children;return l.createElement(l.Fragment,{},t)}},c=l.forwardRef((function(e,t){var n=e.components,s=e.mdxType,a=e.originalType,i=e.parentName,d=o(e,["components","mdxType","originalType","parentName"]),c=u(n),k=s,p=c["".concat(i,".").concat(k)]||c[k]||m[k]||a;return n?l.createElement(p,r(r({ref:t},d),{},{components:n})):l.createElement(p,r({ref:t},d))}));function k(e,t){var n=arguments,s=t&&t.mdxType;if("string"==typeof e||s){var a=n.length,r=new Array(a);r[0]=c;var o={};for(var i in t)hasOwnProperty.call(t,i)&&(o[i]=t[i]);o.originalType=e,o.mdxType="string"==typeof e?e:s,r[1]=o;for(var u=2;u{n.d(t,{Z:()=>r});var l=n(67294),s=n(86010);const a="tabItem_Ymn6";function r(e){let{children:t,hidden:n,className:r}=e;return l.createElement("div",{role:"tabpanel",className:(0,s.Z)(a,r),hidden:n},t)}},65488:(e,t,n)=>{n.d(t,{Z:()=>k});var l=n(87462),s=n(67294),a=n(86010),r=n(72389),o=n(67392),i=n(7094),u=n(12466);const d="tabList__CuJ",m="tabItem_LNqP";function c(e){var t;const{lazy:n,block:r,defaultValue:c,values:k,groupId:p,className:h}=e,y=s.Children.map(e.children,(e=>{if((0,s.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),f=k??y.map((e=>{let{props:{value:t,label:n,attributes:l}}=e;return{value:t,label:n,attributes:l}})),g=(0,o.l)(f,((e,t)=>e.value===t.value));if(g.length>0)throw new Error(`Docusaurus error: Duplicate values "${g.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const N=null===c?c:c??(null==(t=y.find((e=>e.props.default)))?void 0:t.props.value)??y[0].props.value;if(null!==N&&!f.some((e=>e.value===N)))throw new Error(`Docusaurus error: The has a defaultValue "${N}" but none of its children has the corresponding value. Available values are: ${f.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:T,setTabGroupChoices:S}=(0,i.U)(),[v,O]=(0,s.useState)(N),R=[],{blockElementScrollPositionUntilNextRender:E}=(0,u.o5)();if(null!=p){const e=T[p];null!=e&&e!==v&&f.some((t=>t.value===e))&&O(e)}const b=e=>{const t=e.currentTarget,n=R.indexOf(t),l=f[n].value;l!==v&&(E(t),O(l),null!=p&&S(p,String(l)))},x=e=>{var t;let n=null;switch(e.key){case"Enter":b(e);break;case"ArrowRight":{const t=R.indexOf(e.currentTarget)+1;n=R[t]??R[0];break}case"ArrowLeft":{const t=R.indexOf(e.currentTarget)-1;n=R[t]??R[R.length-1];break}}null==(t=n)||t.focus()};return s.createElement("div",{className:(0,a.Z)("tabs-container",d)},s.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,a.Z)("tabs",{"tabs--block":r},h)},f.map((e=>{let{value:t,label:n,attributes:r}=e;return s.createElement("li",(0,l.Z)({role:"tab",tabIndex:v===t?0:-1,"aria-selected":v===t,key:t,ref:e=>R.push(e),onKeyDown:x,onClick:b},r,{className:(0,a.Z)("tabs__item",m,null==r?void 0:r.className,{"tabs__item--active":v===t})}),n??t)}))),n?(0,s.cloneElement)(y.filter((e=>e.props.value===v))[0],{className:"margin-top--md"}):s.createElement("div",{className:"margin-top--md"},y.map(((e,t)=>(0,s.cloneElement)(e,{key:t,hidden:e.props.value!==v})))))}function k(e){const t=(0,r.Z)();return s.createElement(c,(0,l.Z)({key:String(t)},e))}},50358:(e,t,n)=>{n.d(t,{Z:()=>i});var l=n(67294),s=n(52263);const a="authorByline_VoxI",r="authorLabel_a70t",o="authorProfileImage_URwT";const i=function(e){let{frontMatter:t}=e;const{siteConfig:n}=(0,s.Z)(),i=n.customFields.authors;return l.createElement(l.Fragment,null,t.authors&&l.createElement("div",{className:"docAuthors"},l.createElement("hr",null),t.authors.map((e=>l.createElement("div",{key:e,className:a},l.createElement("img",{className:o,src:`/img/${i[e].image?i[e].image:"default_author_profile_pic.png"}`,alt:`Profile picture for ${i[e].name}`}),l.createElement("div",null,l.createElement("div",{className:r},"Author:"),l.createElement("div",null,l.createElement("a",{href:i[e].link,target:"_blank"},i[e].name),", ",i[e].title))))),l.createElement("hr",null)))}},84078:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>N,contentTitle:()=>f,default:()=>v,frontMatter:()=>y,metadata:()=>g,toc:()=>T});var l=n(87462),s=(n(67294),n(3905)),a=n(50358),r=n(64500),o=n(32936),i=n(53742),u=n(82749),d=n(93880),m=n(53188),c=n(36622),k=n(135),p=n(86299),h=n(74476);n(67509);const y={id:"index-quick-start-cheat-sheet",title:"Redis Commands Cheat sheet",sidebar_label:"Cheat sheet",slug:"/howtos/quick-start/cheat-sheet",authors:["prasan","will"]},f=void 0,g={unversionedId:"howtos/quick-start/cheat-sheets/index-quick-start-cheat-sheet",id:"howtos/quick-start/cheat-sheets/index-quick-start-cheat-sheet",title:"Redis Commands Cheat sheet",description:"Connect",source:"@site/docs/howtos/quick-start/cheat-sheets/cheat-sheet.mdx",sourceDirName:"howtos/quick-start/cheat-sheets",slug:"/howtos/quick-start/cheat-sheet",permalink:"/howtos/quick-start/cheat-sheet",draft:!1,editUrl:"https://github.com/redis-developer/redis-developer/edit/master/docs/howtos/quick-start/cheat-sheets/cheat-sheet.mdx",tags:[],version:"current",lastUpdatedAt:1694640549,formattedLastUpdatedAt:"Sep 13, 2023",frontMatter:{id:"index-quick-start-cheat-sheet",title:"Redis Commands Cheat sheet",sidebar_label:"Cheat sheet",slug:"/howtos/quick-start/cheat-sheet",authors:["prasan","will"]},sidebar:"docs",previous:{title:"Overview",permalink:"/howtos"},next:{title:"Overview",permalink:"/howtos/solutions"}},N={},T=[{value:"Connect",id:"connect",level:2},{value:"Strings/Numbers",id:"stringsnumbers",level:2},{value:"Generic",id:"generic",level:2},{value:"Hashes",id:"hashes",level:2},{value:"Sets",id:"sets",level:2},{value:"Sorted sets",id:"sorted-sets",level:2},{value:"Lists",id:"lists",level:2},{value:"Streams",id:"streams",level:2},{value:"\xa0",id:"",level:3},{value:"JSON",id:"json",level:2},{value:"Search and Query",id:"search-and-query",level:2}],S={toc:T};function v(e){let{components:t,...n}=e;return(0,s.kt)("wrapper",(0,l.Z)({},S,n,{components:t,mdxType:"MDXLayout"}),(0,s.kt)(a.Z,{frontMatter:y,mdxType:"Authors"}),(0,s.kt)("h2",{id:"connect"},"Connect"),(0,s.kt)(r.default,{mdxType:"CheatSheetConnect"}),(0,s.kt)("admonition",{type:"note"},(0,s.kt)("p",{parentName:"admonition"},"To setup Redis either locally or in the cloud, refer to the ",(0,s.kt)("a",{parentName:"p",href:"/howtos/quick-start#setup-redis"},(0,s.kt)("u",null,"tutorial")))),(0,s.kt)("h2",{id:"stringsnumbers"},"Strings/Numbers"),(0,s.kt)(o.default,{mdxType:"CheatSheetStrings"}),(0,s.kt)("h2",{id:"generic"},"Generic"),(0,s.kt)(i.default,{mdxType:"CheatSheetGeneric"}),(0,s.kt)("h2",{id:"hashes"},"Hashes"),(0,s.kt)(u.default,{mdxType:"CheatSheetHashes"}),(0,s.kt)("h2",{id:"sets"},"Sets"),(0,s.kt)(d.default,{mdxType:"CheatSheetSets"}),(0,s.kt)("h2",{id:"sorted-sets"},"Sorted sets"),(0,s.kt)(m.default,{mdxType:"CheatSheetSortedSets"}),(0,s.kt)("h2",{id:"lists"},"Lists"),(0,s.kt)(c.default,{mdxType:"CheatSheetLists"}),(0,s.kt)("h2",{id:"streams"},"Streams"),(0,s.kt)(k.default,{mdxType:"CheatSheetStreams"}),(0,s.kt)("h3",{id:""},"\xa0"),(0,s.kt)("hr",null),(0,s.kt)("admonition",{title:"Redis stack commands",type:"info"},(0,s.kt)("p",{parentName:"admonition"},(0,s.kt)("a",{parentName:"p",href:"https://redis.io/docs/about/about-stack/"},(0,s.kt)("u",null,"Redis stack"))," extends the core features\nof Redis OSS like querying across hashes and JSON documents, time series data support,\nfull-text search ..etc")),(0,s.kt)("h2",{id:"json"},"JSON"),(0,s.kt)(p.default,{mdxType:"CheatSheetJSON"}),(0,s.kt)("h2",{id:"search-and-query"},"Search and Query"),(0,s.kt)(h.default,{mdxType:"CheatSheetSearchAndQuery"}))}v.isMDXComponent=!0},64500:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>k,frontMatter:()=>o,metadata:()=>u,toc:()=>m});var l=n(87462),s=(n(67294),n(3905)),a=n(65488),r=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/connect",id:"howtos/quick-start/cheat-sheets/connect",title:"connect",description:" redis-cli\n\n"))),(0,s.kt)(r.Z,{value:"REDIS_INSIGHT",mdxType:"TabItem"},(0,s.kt)("p",null,"Download ",(0,s.kt)("u",null,(0,s.kt)("a",{parentName:"p",href:"https://redis.com/redis-enterprise/redis-insight/"},"RedisInsight"))," to visually explore your Redis data or to engage with raw Redis commands in the workbench. Dive deeper into RedisInsight with these ",(0,s.kt)("u",null,(0,s.kt)("a",{parentName:"p",href:"/explore/redisinsight/"},"tutorials")),"."),(0,s.kt)("p",null,(0,s.kt)("img",{alt:"redis-insight-connect",src:n(40083).Z,width:"1896",height:"954"}))),(0,s.kt)(r.Z,{value:"NODE_JS",mdxType:"TabItem"},(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-js"},"import { createClient } from 'redis';\n\nlet client = createClient({ url: 'redis://localhost:6379' });\n\nawait client.connect();\n\n//await client.set('key', 'value');\n\nawait client.disconnect();\n"))),(0,s.kt)(r.Z,{value:"PYTHON",mdxType:"TabItem"},(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-python"},"import redis\n\nr = redis.Redis(host='localhost', port=6379, db=0)\n"))),(0,s.kt)(r.Z,{value:"C#",mdxType:"TabItem"},(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-csharp"},'using StackExchange.Redis;\n\nConnectionMultiplexer redis = ConnectionMultiplexer.Connect("localhost");\nIDatabase db = redis.GetDatabase();\n'))),(0,s.kt)(r.Z,{value:"JAVA",mdxType:"TabItem"},(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-java"},'import redis.clients.jedis.JedisPooled;\n\nJedisPooled jedis = new JedisPooled("localhost", 6379);\n')))))}k.isMDXComponent=!0},53742:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>k,frontMatter:()=>o,metadata:()=>u,toc:()=>m});var l=n(87462),s=(n(67294),n(3905)),a=n(65488),r=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/generic",id:"howtos/quick-start/cheat-sheets/generic",title:"generic",description:" scan = jedis.scan("0", new ScanParams() {\n {\n match("employee_profile:*");\n }\n});\nscan = jedis.scan(scan.getCursor(), new ScanParams() {\n {\n match("employee_profile:*");\n }\n});\n\n/*\n * DEL key [key ...]\n * O(N)\n * Removes the specified keys.\n */\njedis.del("employee_profile:viraj", "employee_profile:terry""employee_profile:sheera");\n\n/*\n * TTL key\n * O(1)\n * Returns the remaining time to live of a key that has a timeout.\n */\njedis.ttl("employee_profile:nicol");\n')))))}k.isMDXComponent=!0},82749:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>k,frontMatter:()=>o,metadata:()=>u,toc:()=>m});var l=n(87462),s=(n(67294),n(3905)),a=n(65488),r=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/hashes",id:"howtos/quick-start/cheat-sheets/hashes",title:"hashes",description:"{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>k,frontMatter:()=>o,metadata:()=>u,toc:()=>m});var l=n(87462),s=(n(67294),n(3905)),a=n(65488),r=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/json",id:"howtos/quick-start/cheat-sheets/json",title:"json",description:"{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>k,frontMatter:()=>o,metadata:()=>u,toc:()=>m});var l=n(87462),s=(n(67294),n(3905)),a=n(65488),r=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/lists",id:"howtos/quick-start/cheat-sheets/lists",title:"lists",description:"{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>k,frontMatter:()=>o,metadata:()=>u,toc:()=>m});var l=n(87462),s=(n(67294),n(3905)),a=n(65488),r=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/search-and-query",id:"howtos/quick-start/cheat-sheets/search-and-query",title:"search-and-query",description:" 18\nconst query4 = '(@skills:{NodeJS})';\nconst searchResult = await client.ft.search(\n STAFF_INDEX_KEY,\n query1, //query2, query3, query4\n {\n RETURN: ['name', 'age', 'isSingle'],\n LIMIT: {\n from: 0,\n size: 10,\n },\n },\n);\nconsole.log(JSON.stringify(searchResult));\n//{\"total\":1,\"documents\":[{\"id\":\"staff:2\",\"value\":{\"name\":\"Alex\",\"age\":\"45\",\"isSingle\":\"1\"}}]}\n\n/*\n FT.AGGREGATE index query\n Run a search query on an index, and perform aggregate transformations on the results\n\n FT.AGGREGATE staff:index \"(@age:[(10 +inf])\"\n GROUPBY 1 @age\n REDUCE COUNT 0 AS userCount\n SORTBY 1 @age\n LIMIT 0 10\n */\nconst aggregateResult = await client.ft.aggregate(\n STAFF_INDEX_KEY,\n '(@age:[(10 +inf])',\n {\n STEPS: [\n {\n type: AggregateSteps.GROUPBY,\n properties: ['@age'],\n REDUCE: [\n {\n type: AggregateGroupByReducers.COUNT,\n AS: 'userCount',\n },\n ],\n },\n {\n type: AggregateSteps.SORTBY,\n BY: '@age',\n },\n {\n type: AggregateSteps.LIMIT,\n from: 0,\n size: 10,\n },\n ],\n },\n);\nconsole.log(JSON.stringify(aggregateResult));\n//{\"total\":2,\"results\":[{\"age\":\"22\",\"userCount\":\"1\"},{\"age\":\"45\",\"userCount\":\"1\"}]}\n//----\n\n/*\n FT.INFO index\n Return information and statistics on the index\n O(1)\n */\nconst infoResult = await client.ft.info(STAFF_INDEX_KEY);\nconsole.log(infoResult);\n/**\n {\n indexName: 'staff:index',\n numDocs: '2',\n maxDocId: '4',\n stopWords: 2\n ...\n }\n */\n"))),(0,s.kt)(r.Z,{value:"PYTHON",mdxType:"TabItem"},(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-python"},"try:\n r.ft('idx-employees').dropindex()\nexcept:\n pass\n\n# FT.CREATE index [ON HASH | JSON] [PREFIX count prefix [prefix ...]] SCHEMA field_name [AS alias] TEXT | TAG | NUMERIC | GEO | VECTOR | GEOSHAP [SORTABLE [UNF]] [NOINDEX] [ field_name [AS alias] TEXT | TAG | NUMERIC | GEO | VECTOR | GEOSHAPE [ SORTABLE [UNF]] [NOINDEX] ...]\n# O(K) where K is the number of fields in the document, O(N) for keys in the keyspace\n# Creates a new search index with the given specification.\nschema = (TextField('$.name', as_name='name', sortable=True), NumericField('$.age', as_name='age', sortable=True),\n TagField('$.single', as_name='single'), TagField('$.skills[*]', as_name='skills'))\n\nr.ft('idx-employees').create_index(schema, definition=IndexDefinition(\n prefix=['employee_profile:'], index_type=IndexType.JSON))\n\n# FT.INFO index\n# O(1)\n# Return information and statistics on the index.\nr.ft('idx-employees').info()\n\n# FT.SEARCH index query\n# O(N)\n# Search the index with a textual query, returning either documents or just ids\nr.ft('idx-employees').search('Nicol')\nr.ft('idx-employees').search(\"@single:{false}\")\nr.ft('idx-employees').search(\"@skills:{python}\")\nr.ft('idx-employees').search(Query(\"*\").add_filter(NumericFilter('age', 30, 40)))\nr.json().arrappend('employee_profile:karol', '$.skills', 'python', 'java', 'c#')\nr.ft('idx-employees').search(Query(\"@skills:{java}, @skills:{python}\"))\n\n# FT.AGGREGATE index query\n# O(1)\n# Run a search query on an index, and perform aggregate transformations on the results, extracting statistics etc from them\nr.ft('idx-employees').aggregate(aggregations.AggregateRequest(\"*\").group_by('@age',\n reducers.count().alias('count')).sort_by(\"@age\")).rows\n\nr.ft('idx-employees').aggregate(aggregations.AggregateRequest(\"@skills:{python}\").group_by('@skills',\n reducers.tolist('@name').alias('names'))).rows\n"))),(0,s.kt)(r.Z,{value:"C#",mdxType:"TabItem"},(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-csharp"},'try\n{\n /*\n * FT.DROPINDEX index [DD]\n * O(1)\n * Deletes an index and all the documents in it.\n */\n db.FT().DropIndex("idx-employees");\n}\ncatch\n{\n // Index not found\n}\n\n/*\n * FT.CREATE index [ON HASH | JSON] [PREFIX count prefix [prefix ...]] SCHEMA\n * field_name [AS alias] TEXT | TAG | NUMERIC | GEO | VECTOR | GEOSHAP [SORTABLE\n * [UNF]] [NOINDEX] [ field_name [AS alias] TEXT | TAG | NUMERIC | GEO | VECTOR\n * | GEOSHAPE [ SORTABLE [UNF]] [NOINDEX] ...]\n * O(K) where K is the number of fields in the document, O(N) for keys in the\n * keyspace\n * Creates a new search index with the given specification.\n */\ndb.FT().Create("idx-employees", new FTCreateParams()\n .On(IndexDataType.JSON)\n .Prefix("employee_profile:"),\n new Schema()\n .AddTextField(new FieldName("$.name", "name"), sortable: true)\n .AddNumericField(new FieldName("$.age", "age"), sortable: true)\n .AddTagField(new FieldName("$.single", "single"))\n .AddTagField(new FieldName("$.skills[*]", "skills")));\n\n/*\n * FT.INFO index\n * O(1)\n * Returns information and statistics on the index.\n */\ndb.FT().Info("idx-employees");\n\n/*\n * FT._LIST\n * O(1)\n * Returns a list of all existing indexes.\n */\ndb.FT()._List();\n\n/*\n * FT.SEARCH index query\n * O(N)\n * Search the index with a textual query, returning either documents or just ids\n */\ndb.FT().Search("idx-employees", new Query("@name:{nicol}"));\ndb.FT().Search("idx-employees", new Query("@single:{false}"));\ndb.FT().Search("idx-employees", new Query("@skills:{python}"));\ndb.FT().Search("idx-employees", new Query().AddFilter(new NumericFilter("@age", 30, 40)));\ndb.JSON().ArrAppend("employee_profile:karol", "$.skills", "python", "java", "c#");\ndb.FT().Search("idx-employees", new Query("@skills:{java}, @skills:{python}"));\n\n/*\n * FT.AGGREGATE index query\n * O(1)\n * Run a search query on an index, and perform aggregate transformations on the\n * results, extracting statistics etc from them\n */\ndb.FT().Aggregate("idx-employees", new AggregationRequest("@age:[20 40]")\n .GroupBy("@age", Reducers.Count().As("count"))\n .SortBy(new SortedField("@age", SortedField.SortOrder.ASC)));\ndb.FT().Aggregate("idx-employees", new AggregationRequest("@skills:{python}")\n .GroupBy("@skills", Reducers.ToList("@name").As("names")));\n'))),(0,s.kt)(r.Z,{value:"JAVA",mdxType:"TabItem"},(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-java"},'try {\n jedis.ftDropIndex("idx-employees");\n} catch (Exception e) {\n // Index not found\n}\n\n/*\n * FT.CREATE index [ON HASH | JSON] [PREFIX count prefix [prefix ...]] SCHEMA\n * field_name [AS alias] TEXT | TAG | NUMERIC | GEO | VECTOR | GEOSHAP [SORTABLE\n * [UNF]] [NOINDEX] [ field_name [AS alias] TEXT | TAG | NUMERIC | GEO | VECTOR\n * | GEOSHAPE [ SORTABLE [UNF]] [NOINDEX] ...]\n * O(K) where K is the number of fields in the document, O(N) for keys in the\n * keyspace\n * Creates a new search index with the given specification.\n */\nSchema schema = new Schema()\n .addSortableTextField("$.name", 1.0).as("name")\n .addSortableNumericField("$.age").as("age")\n .addTagField("$.single").as("single")\n .addTagField("$.skills[*]").as("skills");\n\nIndexDefinition def = new IndexDefinition(IndexDefinition.Type.JSON)\n .setPrefixes("employee_profile:");\n\njedis.ftCreate("idx-employees", IndexOptions.defaultOptions().setDefinition(def), schema);\n\n/*\n * FT.INFO index\n * O(1)\n * Returns information and statistics on the index.\n */\njedis.ftInfo("idx-employees");\n\n/*\n * FT._LIST\n * O(1)\n * Returns a list of all existing indexes.\n */\njedis.ftList();\n\n/*\n * FT.SEARCH index query\n * O(N)\n * Search the index with a textual query, returning either documents or just ids\n */\njedis.ftSearch("idx-employees", "Nicol");\njedis.ftSearch("idx-employees", "@single:{false}");\njedis.ftSearch("idx-employees", "@skills:{python}");\njedis.ftSearch("idx-employees", "*",\n FTSearchParams.searchParams().filter(new NumericFilter("age", 30, 40)));\njedis.jsonArrAppend("employee_profile:karol", Path2.of("$.skills"), "\\"python\\"", "\\"java\\"", "\\"c#\\"");\njedis.ftSearch("idx-employees", "@skills:{java}, @skills:{python}");\n\n/*\n * FT.AGGREGATE index query\n * O(1)\n * Run a search query on an index, and perform aggregate transformations on the\n * results, extracting statistics etc from them\n */\njedis.ftAggregate("idx-employees", new AggregationBuilder()\n .groupBy("@age", Reducers.count().as("count")).sortBy(new SortedField("@age", SortOrder.ASC)))\n .getRows();\njedis.ftAggregate("idx-employees", new AggregationBuilder("@skills:{python}")\n .groupBy("@skills", Reducers.to_list("@name").as("names")))\n .getRows();\n')))))}k.isMDXComponent=!0},93880:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>k,frontMatter:()=>o,metadata:()=>u,toc:()=>m});var l=n(87462),s=(n(67294),n(3905)),a=n(65488),r=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/sets",id:"howtos/quick-start/cheat-sheets/sets",title:"sets",description:"{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>k,frontMatter:()=>o,metadata:()=>u,toc:()=>m});var l=n(87462),s=(n(67294),n(3905)),a=n(65488),r=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/sorted-sets",id:"howtos/quick-start/cheat-sheets/sorted-sets",title:"sorted-sets",description:"{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>k,frontMatter:()=>o,metadata:()=>u,toc:()=>m});var l=n(87462),s=(n(67294),n(3905)),a=n(65488),r=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/streams",id:"howtos/quick-start/cheat-sheets/streams",title:"streams",description:"{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>k,frontMatter:()=>o,metadata:()=>u,toc:()=>m});var l=n(87462),s=(n(67294),n(3905)),a=n(65488),r=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/strings",id:"howtos/quick-start/cheat-sheets/strings",title:"strings",description:"{n.r(t),n.d(t,{assets:()=>i,contentTitle:()=>r,default:()=>m,frontMatter:()=>a,metadata:()=>o,toc:()=>u});var l=n(87462),s=(n(67294),n(3905));const a={},r=void 0,o={unversionedId:"howtos/quick-start/cheat-sheets/triggers-and-functions",id:"howtos/quick-start/cheat-sheets/triggers-and-functions",title:"triggers-and-functions",description:"",source:"@site/docs/howtos/quick-start/cheat-sheets/triggers-and-functions.mdx",sourceDirName:"howtos/quick-start/cheat-sheets",slug:"/howtos/quick-start/cheat-sheets/triggers-and-functions",permalink:"/howtos/quick-start/cheat-sheets/triggers-and-functions",draft:!1,editUrl:"https://github.com/redis-developer/redis-developer/edit/master/docs/howtos/quick-start/cheat-sheets/triggers-and-functions.mdx",tags:[],version:"current",lastUpdatedAt:1694640549,formattedLastUpdatedAt:"Sep 13, 2023",frontMatter:{}},i={},u=[],d={toc:u};function m(e){let{components:t,...n}=e;return(0,s.kt)("wrapper",(0,l.Z)({},d,n,{components:t,mdxType:"MDXLayout"}))}m.isMDXComponent=!0},40083:(e,t,n)=>{n.d(t,{Z:()=>l});const l=n.p+"assets/images/redis-insight-connect-ca60cd0282c3a317e5608da154f94867.png"}}]); \ No newline at end of file diff --git a/assets/js/f23cc535.6e1ab86e.js b/assets/js/f23cc535.6e1ab86e.js deleted file mode 100644 index 637ec2214e..0000000000 --- a/assets/js/f23cc535.6e1ab86e.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkredis_developer_hub=self.webpackChunkredis_developer_hub||[]).push([[3820,370,5815,1380,5979,5426,7826,1309,9045,7269,3878,7729],{3905:(e,t,n)=>{n.d(t,{Zo:()=>d,kt:()=>m});var l=n(67294);function s(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function a(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);t&&(l=l.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,l)}return n}function r(e){for(var t=1;t=0||(s[n]=e[n]);return s}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(l=0;l=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(s[n]=e[n])}return s}var i=l.createContext({}),u=function(e){var t=l.useContext(i),n=t;return e&&(n="function"==typeof e?e(t):r(r({},t),e)),n},d=function(e){var t=u(e.components);return l.createElement(i.Provider,{value:t},e.children)},c={inlineCode:"code",wrapper:function(e){var t=e.children;return l.createElement(l.Fragment,{},t)}},k=l.forwardRef((function(e,t){var n=e.components,s=e.mdxType,a=e.originalType,i=e.parentName,d=o(e,["components","mdxType","originalType","parentName"]),k=u(n),m=s,h=k["".concat(i,".").concat(m)]||k[m]||c[m]||a;return n?l.createElement(h,r(r({ref:t},d),{},{components:n})):l.createElement(h,r({ref:t},d))}));function m(e,t){var n=arguments,s=t&&t.mdxType;if("string"==typeof e||s){var a=n.length,r=new Array(a);r[0]=k;var o={};for(var i in t)hasOwnProperty.call(t,i)&&(o[i]=t[i]);o.originalType=e,o.mdxType="string"==typeof e?e:s,r[1]=o;for(var u=2;u{n.d(t,{Z:()=>r});var l=n(67294),s=n(86010);const a="tabItem_Ymn6";function r(e){let{children:t,hidden:n,className:r}=e;return l.createElement("div",{role:"tabpanel",className:(0,s.Z)(a,r),hidden:n},t)}},65488:(e,t,n)=>{n.d(t,{Z:()=>m});var l=n(87462),s=n(67294),a=n(86010),r=n(72389),o=n(67392),i=n(7094),u=n(12466);const d="tabList__CuJ",c="tabItem_LNqP";function k(e){var t;const{lazy:n,block:r,defaultValue:k,values:m,groupId:h,className:p}=e,y=s.Children.map(e.children,(e=>{if((0,s.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),f=m??y.map((e=>{let{props:{value:t,label:n,attributes:l}}=e;return{value:t,label:n,attributes:l}})),g=(0,o.l)(f,((e,t)=>e.value===t.value));if(g.length>0)throw new Error(`Docusaurus error: Duplicate values "${g.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const S=null===k?k:k??(null==(t=y.find((e=>e.props.default)))?void 0:t.props.value)??y[0].props.value;if(null!==S&&!f.some((e=>e.value===S)))throw new Error(`Docusaurus error: The has a defaultValue "${S}" but none of its children has the corresponding value. Available values are: ${f.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:T,setTabGroupChoices:v}=(0,i.U)(),[R,N]=(0,s.useState)(S),E=[],{blockElementScrollPositionUntilNextRender:O}=(0,u.o5)();if(null!=h){const e=T[h];null!=e&&e!==R&&f.some((t=>t.value===e))&&N(e)}const w=e=>{const t=e.currentTarget,n=E.indexOf(t),l=f[n].value;l!==R&&(O(t),N(l),null!=h&&v(h,String(l)))},x=e=>{var t;let n=null;switch(e.key){case"Enter":w(e);break;case"ArrowRight":{const t=E.indexOf(e.currentTarget)+1;n=E[t]??E[0];break}case"ArrowLeft":{const t=E.indexOf(e.currentTarget)-1;n=E[t]??E[E.length-1];break}}null==(t=n)||t.focus()};return s.createElement("div",{className:(0,a.Z)("tabs-container",d)},s.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,a.Z)("tabs",{"tabs--block":r},p)},f.map((e=>{let{value:t,label:n,attributes:r}=e;return s.createElement("li",(0,l.Z)({role:"tab",tabIndex:R===t?0:-1,"aria-selected":R===t,key:t,ref:e=>E.push(e),onKeyDown:x,onClick:w},r,{className:(0,a.Z)("tabs__item",c,null==r?void 0:r.className,{"tabs__item--active":R===t})}),n??t)}))),n?(0,s.cloneElement)(y.filter((e=>e.props.value===R))[0],{className:"margin-top--md"}):s.createElement("div",{className:"margin-top--md"},y.map(((e,t)=>(0,s.cloneElement)(e,{key:t,hidden:e.props.value!==R})))))}function m(e){const t=(0,r.Z)();return s.createElement(k,(0,l.Z)({key:String(t)},e))}},50358:(e,t,n)=>{n.d(t,{Z:()=>i});var l=n(67294),s=n(52263);const a="authorByline_VoxI",r="authorLabel_a70t",o="authorProfileImage_URwT";const i=function(e){let{frontMatter:t}=e;const{siteConfig:n}=(0,s.Z)(),i=n.customFields.authors;return l.createElement(l.Fragment,null,t.authors&&l.createElement("div",{className:"docAuthors"},l.createElement("hr",null),t.authors.map((e=>l.createElement("div",{key:e,className:a},l.createElement("img",{className:o,src:`/img/${i[e].image?i[e].image:"default_author_profile_pic.png"}`,alt:`Profile picture for ${i[e].name}`}),l.createElement("div",null,l.createElement("div",{className:r},"Author:"),l.createElement("div",null,l.createElement("a",{href:i[e].link,target:"_blank"},i[e].name),", ",i[e].title))))),l.createElement("hr",null)))}},84078:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>S,contentTitle:()=>f,default:()=>R,frontMatter:()=>y,metadata:()=>g,toc:()=>T});var l=n(87462),s=(n(67294),n(3905)),a=n(50358),r=n(64500),o=n(32936),i=n(53742),u=n(82749),d=n(93880),c=n(53188),k=n(36622),m=n(135),h=n(86299),p=n(74476);n(67509);const y={id:"index-quick-start-cheat-sheet",title:"Redis Commands Cheat sheet",sidebar_label:"Cheat sheet",slug:"/howtos/quick-start/cheat-sheet",authors:["prasan","will"]},f=void 0,g={unversionedId:"howtos/quick-start/cheat-sheets/index-quick-start-cheat-sheet",id:"howtos/quick-start/cheat-sheets/index-quick-start-cheat-sheet",title:"Redis Commands Cheat sheet",description:"Connect",source:"@site/docs/howtos/quick-start/cheat-sheets/cheat-sheet.mdx",sourceDirName:"howtos/quick-start/cheat-sheets",slug:"/howtos/quick-start/cheat-sheet",permalink:"/howtos/quick-start/cheat-sheet",draft:!1,editUrl:"https://github.com/redis-developer/redis-developer/edit/master/docs/howtos/quick-start/cheat-sheets/cheat-sheet.mdx",tags:[],version:"current",lastUpdatedAt:1694640549,formattedLastUpdatedAt:"Sep 13, 2023",frontMatter:{id:"index-quick-start-cheat-sheet",title:"Redis Commands Cheat sheet",sidebar_label:"Cheat sheet",slug:"/howtos/quick-start/cheat-sheet",authors:["prasan","will"]},sidebar:"docs",previous:{title:"Overview",permalink:"/howtos"},next:{title:"Overview",permalink:"/howtos/solutions"}},S={},T=[{value:"Connect",id:"connect",level:2},{value:"Strings/Numbers",id:"stringsnumbers",level:2},{value:"Generic",id:"generic",level:2},{value:"Hashes",id:"hashes",level:2},{value:"Sets",id:"sets",level:2},{value:"Sorted sets",id:"sorted-sets",level:2},{value:"Lists",id:"lists",level:2},{value:"Streams",id:"streams",level:2},{value:"\xa0",id:"",level:3},{value:"JSON",id:"json",level:2},{value:"Search and Query",id:"search-and-query",level:2}],v={toc:T};function R(e){let{components:t,...n}=e;return(0,s.kt)("wrapper",(0,l.Z)({},v,n,{components:t,mdxType:"MDXLayout"}),(0,s.kt)(a.Z,{frontMatter:y,mdxType:"Authors"}),(0,s.kt)("h2",{id:"connect"},"Connect"),(0,s.kt)(r.default,{mdxType:"CheatSheetConnect"}),(0,s.kt)("admonition",{type:"note"},(0,s.kt)("p",{parentName:"admonition"},"To setup Redis either locally or in the cloud, refer to the ",(0,s.kt)("a",{parentName:"p",href:"/howtos/quick-start#setup-redis"},(0,s.kt)("u",null,"tutorial")))),(0,s.kt)("h2",{id:"stringsnumbers"},"Strings/Numbers"),(0,s.kt)(o.default,{mdxType:"CheatSheetStrings"}),(0,s.kt)("h2",{id:"generic"},"Generic"),(0,s.kt)(i.default,{mdxType:"CheatSheetGeneric"}),(0,s.kt)("h2",{id:"hashes"},"Hashes"),(0,s.kt)(u.default,{mdxType:"CheatSheetHashes"}),(0,s.kt)("h2",{id:"sets"},"Sets"),(0,s.kt)(d.default,{mdxType:"CheatSheetSets"}),(0,s.kt)("h2",{id:"sorted-sets"},"Sorted sets"),(0,s.kt)(c.default,{mdxType:"CheatSheetSortedSets"}),(0,s.kt)("h2",{id:"lists"},"Lists"),(0,s.kt)(k.default,{mdxType:"CheatSheetLists"}),(0,s.kt)("h2",{id:"streams"},"Streams"),(0,s.kt)(m.default,{mdxType:"CheatSheetStreams"}),(0,s.kt)("h3",{id:""},"\xa0"),(0,s.kt)("hr",null),(0,s.kt)("admonition",{title:"Redis stack commands",type:"info"},(0,s.kt)("p",{parentName:"admonition"},(0,s.kt)("a",{parentName:"p",href:"https://redis.io/docs/about/about-stack/"},(0,s.kt)("u",null,"Redis stack"))," extends the core features\nof Redis OSS like querying across hashes and JSON documents, time series data support,\nfull-text search ..etc")),(0,s.kt)("h2",{id:"json"},"JSON"),(0,s.kt)(h.default,{mdxType:"CheatSheetJSON"}),(0,s.kt)("h2",{id:"search-and-query"},"Search and Query"),(0,s.kt)(p.default,{mdxType:"CheatSheetSearchAndQuery"}))}R.isMDXComponent=!0},64500:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>m,frontMatter:()=>o,metadata:()=>u,toc:()=>c});var l=n(87462),s=(n(67294),n(3905)),a=n(65488),r=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/connect",id:"howtos/quick-start/cheat-sheets/connect",title:"connect",description:" redis-cli\n\n"))),(0,s.kt)(r.Z,{value:"REDIS_INSIGHT",mdxType:"TabItem"},(0,s.kt)("p",null,"Download ",(0,s.kt)("u",null,(0,s.kt)("a",{parentName:"p",href:"https://redis.com/redis-enterprise/redis-insight/"},"RedisInsight"))," to visually explore your Redis data or to engage with raw Redis commands in the workbench. Dive deeper into RedisInsight with these ",(0,s.kt)("u",null,(0,s.kt)("a",{parentName:"p",href:"/explore/redisinsight/"},"tutorials")),"."),(0,s.kt)("p",null,(0,s.kt)("img",{alt:"redis-insight-connect",src:n(40083).Z,width:"1896",height:"954"}))),(0,s.kt)(r.Z,{value:"NODE_JS",mdxType:"TabItem"},(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-js"},"import { createClient } from 'redis';\n\nlet client = createClient({ url: 'redis://localhost:6379' });\n\nawait client.connect();\n\n//await client.set('key', 'value');\n\nawait client.disconnect();\n")))))}m.isMDXComponent=!0},53742:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>m,frontMatter:()=>o,metadata:()=>u,toc:()=>c});var l=n(87462),s=(n(67294),n(3905)),a=n(65488),r=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/generic",id:"howtos/quick-start/cheat-sheets/generic",title:"generic",description:"{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>m,frontMatter:()=>o,metadata:()=>u,toc:()=>c});var l=n(87462),s=(n(67294),n(3905)),a=n(65488),r=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/hashes",id:"howtos/quick-start/cheat-sheets/hashes",title:"hashes",description:"{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>m,frontMatter:()=>o,metadata:()=>u,toc:()=>c});var l=n(87462),s=(n(67294),n(3905)),a=n(65488),r=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/json",id:"howtos/quick-start/cheat-sheets/json",title:"json",description:"{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>m,frontMatter:()=>o,metadata:()=>u,toc:()=>c});var l=n(87462),s=(n(67294),n(3905)),a=n(65488),r=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/lists",id:"howtos/quick-start/cheat-sheets/lists",title:"lists",description:"{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>m,frontMatter:()=>o,metadata:()=>u,toc:()=>c});var l=n(87462),s=(n(67294),n(3905)),a=n(65488),r=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/search-and-query",id:"howtos/quick-start/cheat-sheets/search-and-query",title:"search-and-query",description:" 18\nconst query4 = '(@skills:{NodeJS})';\nconst searchResult = await client.ft.search(\n STAFF_INDEX_KEY,\n query1, //query2, query3, query4\n {\n RETURN: ['name', 'age', 'isSingle'],\n LIMIT: {\n from: 0,\n size: 10,\n },\n },\n);\nconsole.log(JSON.stringify(searchResult));\n//{\"total\":1,\"documents\":[{\"id\":\"staff:2\",\"value\":{\"name\":\"Alex\",\"age\":\"45\",\"isSingle\":\"1\"}}]}\n\n/*\n FT.AGGREGATE index query\n Run a search query on an index, and perform aggregate transformations on the results\n\n FT.AGGREGATE staff:index \"(@age:[(10 +inf])\" \n GROUPBY 1 @age \n REDUCE COUNT 0 AS userCount\n SORTBY 1 @age\n LIMIT 0 10\n */\nconst aggregateResult = await client.ft.aggregate(\n STAFF_INDEX_KEY,\n '(@age:[(10 +inf])',\n {\n STEPS: [\n {\n type: AggregateSteps.GROUPBY,\n properties: ['@age'],\n REDUCE: [\n {\n type: AggregateGroupByReducers.COUNT,\n AS: 'userCount',\n },\n ],\n },\n {\n type: AggregateSteps.SORTBY,\n BY: '@age',\n },\n {\n type: AggregateSteps.LIMIT,\n from: 0,\n size: 10,\n },\n ],\n },\n);\nconsole.log(JSON.stringify(aggregateResult));\n//{\"total\":2,\"results\":[{\"age\":\"22\",\"userCount\":\"1\"},{\"age\":\"45\",\"userCount\":\"1\"}]}\n//----\n\n/*\n FT.INFO index\n Return information and statistics on the index\n O(1)\n */\nconst infoResult = await client.ft.info(STAFF_INDEX_KEY);\nconsole.log(infoResult);\n/**\n {\n indexName: 'staff:index',\n numDocs: '2',\n maxDocId: '4',\n stopWords: 2\n ...\n }\n */\n")))))}m.isMDXComponent=!0},93880:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>m,frontMatter:()=>o,metadata:()=>u,toc:()=>c});var l=n(87462),s=(n(67294),n(3905)),a=n(65488),r=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/sets",id:"howtos/quick-start/cheat-sheets/sets",title:"sets",description:"{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>m,frontMatter:()=>o,metadata:()=>u,toc:()=>c});var l=n(87462),s=(n(67294),n(3905)),a=n(65488),r=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/sorted-sets",id:"howtos/quick-start/cheat-sheets/sorted-sets",title:"sorted-sets",description:"{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>m,frontMatter:()=>o,metadata:()=>u,toc:()=>c});var l=n(87462),s=(n(67294),n(3905)),a=n(65488),r=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/streams",id:"howtos/quick-start/cheat-sheets/streams",title:"streams",description:"{n.r(t),n.d(t,{assets:()=>d,contentTitle:()=>i,default:()=>m,frontMatter:()=>o,metadata:()=>u,toc:()=>c});var l=n(87462),s=(n(67294),n(3905)),a=n(65488),r=n(85162);const o={},i=void 0,u={unversionedId:"howtos/quick-start/cheat-sheets/strings",id:"howtos/quick-start/cheat-sheets/strings",title:"strings",description:"{n.r(t),n.d(t,{assets:()=>i,contentTitle:()=>r,default:()=>c,frontMatter:()=>a,metadata:()=>o,toc:()=>u});var l=n(87462),s=(n(67294),n(3905));const a={},r=void 0,o={unversionedId:"howtos/quick-start/cheat-sheets/triggers-and-functions",id:"howtos/quick-start/cheat-sheets/triggers-and-functions",title:"triggers-and-functions",description:"",source:"@site/docs/howtos/quick-start/cheat-sheets/triggers-and-functions.mdx",sourceDirName:"howtos/quick-start/cheat-sheets",slug:"/howtos/quick-start/cheat-sheets/triggers-and-functions",permalink:"/howtos/quick-start/cheat-sheets/triggers-and-functions",draft:!1,editUrl:"https://github.com/redis-developer/redis-developer/edit/master/docs/howtos/quick-start/cheat-sheets/triggers-and-functions.mdx",tags:[],version:"current",lastUpdatedAt:1694640549,formattedLastUpdatedAt:"Sep 13, 2023",frontMatter:{}},i={},u=[],d={toc:u};function c(e){let{components:t,...n}=e;return(0,s.kt)("wrapper",(0,l.Z)({},d,n,{components:t,mdxType:"MDXLayout"}))}c.isMDXComponent=!0},40083:(e,t,n)=>{n.d(t,{Z:()=>l});const l=n.p+"assets/images/redis-insight-connect-ca60cd0282c3a317e5608da154f94867.png"}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.658be35f.js b/assets/js/runtime~main.00b4f70d.js similarity index 97% rename from assets/js/runtime~main.658be35f.js rename to assets/js/runtime~main.00b4f70d.js index 85833c8122..0126f065ed 100644 --- a/assets/js/runtime~main.658be35f.js +++ b/assets/js/runtime~main.00b4f70d.js @@ -1 +1 @@ -(()=>{"use strict";var e,c,d,b,a,f={},r={};function t(e){var c=r[e];if(void 0!==c)return c.exports;var d=r[e]={exports:{}};return f[e].call(d.exports,d,d.exports,t),d.exports}t.m=f,e=[],t.O=(c,d,b,a)=>{if(!d){var f=1/0;for(i=0;i=a)&&Object.keys(t.O).every((e=>t.O[e](d[o])))?d.splice(o--,1):(r=!1,a0&&e[i-1][2]>a;i--)e[i]=e[i-1];e[i]=[d,b,a]},t.n=e=>{var c=e&&e.__esModule?()=>e.default:()=>e;return t.d(c,{a:c}),c},d=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,t.t=function(e,b){if(1&b&&(e=this(e)),8&b)return e;if("object"==typeof e&&e){if(4&b&&e.__esModule)return e;if(16&b&&"function"==typeof e.then)return e}var a=Object.create(null);t.r(a);var f={};c=c||[null,d({}),d([]),d(d)];for(var r=2&b&&e;"object"==typeof r&&!~c.indexOf(r);r=d(r))Object.getOwnPropertyNames(r).forEach((c=>f[c]=()=>e[c]));return f.default=()=>e,t.d(a,f),a},t.d=(e,c)=>{for(var d in c)t.o(c,d)&&!t.o(e,d)&&Object.defineProperty(e,d,{enumerable:!0,get:c[d]})},t.f={},t.e=e=>Promise.all(Object.keys(t.f).reduce(((c,d)=>(t.f[d](e,c),c)),[])),t.u=e=>"assets/js/"+({4:"642fde82",37:"4504686e",46:"69c2ba4e",53:"935f2afb",61:"1900cdc0",87:"1dc33e9c",116:"f88d2135",134:"9d90cc60",141:"185a5f78",149:"1e115aba",153:"e133680b",221:"9eeb107e",247:"2f1cd5d9",342:"d661e85d",370:"085ffbd7",409:"1cf06b0d",457:"93148877",459:"c1ab566a",464:"1ef24e58",514:"90f408d1",550:"bf9dc990",559:"bbaff741",637:"fa8b3f88",728:"963a2101",763:"3483a674",779:"732072df",793:"910d84be",821:"45db1732",863:"dfeb6cfd",911:"2bc54e2b",913:"73e252ed",966:"54399a42",1014:"3c066edc",1037:"886ca5ea",1056:"c32ed07a",1059:"64f3f413",1065:"335cbbbb",1067:"cefa0e41",1084:"0c61a2aa",1135:"4cfa8051",1244:"6f05ff58",1261:"3cbbe40c",1280:"710646cd",1284:"b51fdc8c",1298:"5c1a86d3",1309:"872db8be",1317:"098330c0",1321:"7ea171e5",1324:"0b9646e8",1348:"db1d58d4",1356:"899fdd9f",1358:"49623f30",1367:"85d526b8",1380:"21bf2b11",1381:"37c04a16",1389:"5d14e41e",1396:"c3151dc9",1506:"7460fc1d",1567:"51a7d035",1595:"5c3c7026",1601:"55ccde6e",1617:"96c61e5d",1629:"99cb7080",1638:"4843ea13",1672:"2e56b509",1698:"4141cbdd",1701:"e7e99a29",1727:"7f1e28a5",1735:"00d7f298",1743:"c65a5e23",1768:"99eb9860",1819:"81e8711f",1849:"f5985471",1906:"2c06bb79",1924:"c2cefeac",1966:"d3f14484",2066:"97a92876",2115:"f8bde386",2124:"e14e08fc",2163:"0545e01d",2201:"65a1cf65",2206:"d888d60e",2218:"fd0bff62",2250:"f242a466",2295:"31ead50b",2302:"3bb72ea1",2341:"9daaef4f",2376:"7d0548cb",2399:"70a5ec54",2421:"169d51e4",2451:"8632c9a0",2525:"163f4d81",2536:"c98631ab",2551:"6b968562",2600:"c8a0be4d",2626:"099d618a",2649:"f8c0f22b",2659:"730e35c4",2684:"ad08431a",2801:"470c62dd",2841:"e7fe152c",2898:"e1816353",2914:"feb7b0b4",2925:"94686630",2930:"b949b843",2970:"df84b43c",3007:"af72bfd1",3015:"129ce714",3019:"8083ca96",3104:"4d561b09",3106:"b0c5f580",3108:"2c83c4e0",3114:"552ba15d",3156:"9d845a85",3165:"b88966c9",3171:"d119b4b9",3186:"6b006b96",3237:"1df93b7f",3294:"0c264ecc",3334:"3962b4b6",3335:"f19dd2c1",3375:"fb01d865",3451:"6410f0dc",3459:"7654669a",3527:"b4c6035f",3551:"71898787",3569:"74ac41dd",3575:"dce58c9e",3610:"584963dc",3659:"994c0519",3721:"aa02f6e6",3723:"e1c2ddaa",3743:"e9b9d3de",3751:"3720c009",3752:"c1e8360a",3762:"2d271c04",3789:"f1811081",3790:"98d604d5",3814:"b8f4daa2",3820:"f23cc535",3878:"c551111a",3891:"e1d45d33",3892:"81b2d599",3901:"265284fc",3984:"fe9a220a",4001:"45c1b0fa",4015:"d329623b",4074:"0ec1cc91",4089:"ebafc251",4092:"0c9e8015",4102:"bbc4d579",4121:"55960ee5",4139:"076ede8c",4190:"aa630107",4195:"14612fe5",4226:"1ecc9877",4230:"e65f8228",4261:"db91b37d",4275:"329b0232",4328:"ba6f6003",4361:"e1610694",4479:"b3b34ca6",4497:"5c25e952",4506:"879b2dca",4518:"6e373ae3",4597:"64867942",4641:"1d1b3f81",4670:"a9a3a51c",4698:"29eff2aa",4705:"106fc9f0",4717:"932ef172",4788:"2cc2cea1",4802:"9c28af5e",4803:"20ee7761",4821:"a8be125d",4845:"d63023de",4850:"c6304617",4931:"7f823891",4936:"6c272831",4949:"fb3c2ee3",4968:"47f30d1f",4974:"6980e26f",4985:"34bbed6c",5037:"e7f62945",5044:"9f69dad8",5047:"1ff98f28",5050:"e8e1f04a",5100:"a5b41932",5114:"4c603040",5146:"28b9548b",5188:"b061d76e",5201:"31350f16",5290:"b400c9cd",5307:"b3fefca0",5312:"1341f4ef",5426:"51ede774",5439:"f57bd6c4",5492:"ada40f98",5500:"c414f47b",5516:"18edca16",5550:"fba0c1ab",5599:"5a6c6630",5615:"23200c1b",5653:"03506951",5658:"3d95878d",5667:"1ab3d3f0",5683:"a5c3d9e9",5745:"d866cdfa",5784:"110bbf9d",5805:"d3555a77",5815:"165403c2",5853:"b0c8cd2e",5867:"f826be22",5870:"4b53257e",5928:"a3dbe69d",5937:"9292d398",5979:"46c09b1c",5980:"c1bb9856",5992:"f7773721",5998:"b2c8b850",6023:"eed077d2",6045:"d7f02a39",6047:"12853b3b",6060:"4be55f35",6082:"ccdc297e",6106:"eb3f2693",6123:"c554cc93",6154:"19987802",6170:"9a2b95c5",6202:"862d5cd0",6205:"1282e92b",6216:"6c249fdd",6221:"434536ad",6347:"16138ba5",6365:"2d198a95",6425:"18050ae3",6470:"c5c6e5e3",6487:"eaf8982a",6502:"13ca8dc8",6517:"84711cce",6553:"2601a326",6584:"e83211e5",6593:"a3302592",6615:"50b6ea97",6657:"965be803",6667:"0c306a96",6709:"2123ba4c",6754:"13155904",6779:"4c591bd5",6835:"9e5bb348",6919:"8ec5e180",6946:"6b6fd196",6972:"07a6fe21",6973:"214a573e",6980:"e49a9233",7005:"4e18a229",7040:"531e7017",7119:"fd375cb3",7128:"eb92c419",7269:"9f54a902",7271:"65be7fcc",7292:"8fea5263",7338:"613fe82a",7367:"aa68f0e9",7400:"4c958ddc",7422:"44556e56",7428:"e2919432",7459:"5b944e4b",7466:"46b3dd76",7470:"5450f727",7502:"3b0e0d3a",7530:"d9fa573c",7599:"cd548de4",7603:"1cc9ce96",7607:"1756b4ab",7615:"bfe77eb6",7642:"90e92e2d",7644:"bc14dfd3",7676:"ba498387",7678:"d9a0a71f",7682:"fd091685",7719:"2b69ed49",7729:"ddd418a4",7733:"16a31933",7809:"2cac92cf",7811:"8d1d1c2e",7813:"8632df87",7826:"7a460b64",7846:"d439d214",7880:"bda60f59",7894:"ed35ad37",7903:"59bac03b",7918:"17896441",7919:"78895142",7982:"8541537e",7992:"0abd8cd0",8003:"8460a6e0",8006:"53b913eb",8087:"405b4698",8145:"83a556d4",8252:"d69fc27c",8255:"dc17824a",8314:"18719bcf",8338:"223019eb",8345:"82b6bb4d",8359:"3b168288",8361:"0f0e7497",8403:"2b53ff98",8413:"c978831f",8440:"a33ab7b5",8443:"806d421f",8454:"f9ed4a2e",8464:"b59188bc",8468:"24585ef6",8477:"f251a97b",8520:"0edeee5a",8559:"a69121a9",8560:"f7282e3d",8562:"c42ebdb1",8563:"b697db3c",8643:"f2d3ddf1",8681:"9a2aa159",8712:"c3e7abab",8737:"412a9523",8760:"389b810a",8849:"c34f2f42",8851:"5ef34981",8855:"b67b88ed",8879:"3d36e868",8922:"0b7ca231",8924:"cc1ddd8c",8950:"ba3bf6d5",8962:"cb3ee170",8969:"5420bc6f",8986:"a55e4082",8997:"4c484ee6",9008:"ab704c68",9012:"f3dbe95e",9023:"651422d5",9032:"27dc3412",9045:"94f8b490",9047:"84ad0742",9074:"4393e4bc",9083:"844f5f7b",9103:"b6e690a8",9156:"70771f47",9184:"9918870c",9190:"9d037edf",9231:"4458e1ed",9290:"a6e0ba43",9342:"5b01a645",9348:"0ca7b406",9353:"b70c40d3",9355:"d5484ed9",9392:"ab39e009",9398:"eab4b0fe",9405:"344dca25",9461:"47016385",9499:"2e86cab0",9514:"1be78505",9528:"700fba64",9546:"a1e09814",9566:"e1a3fa89",9572:"7a1dcf5b",9589:"95a8df98",9590:"88c087fa",9605:"81fab8ae",9627:"eaf6f8ac",9636:"0776bfb3",9677:"67747c0c",9688:"e368a9fb",9743:"0fdc98ff",9769:"f4457846",9774:"42f12bbb",9783:"11166ec1",9884:"92dce1fb",9924:"df203c0f",9933:"16ae7048",9948:"531549fe",9966:"c9c86724",9993:"0566ee4e"}[e]||e)+"."+{4:"4e9a5cf9",37:"d0c0ee9a",46:"ee8a0396",53:"db3fa784",61:"a77abad9",87:"615b8dd3",116:"d88674bc",134:"1484cdb7",141:"9fc44a3a",149:"b0d3139a",153:"b83a2475",221:"bb3028d5",247:"5db2e7c5",342:"4b622c56",370:"e238594b",409:"e05c44db",457:"d201e113",459:"17ea8335",464:"c3e3948c",486:"e1f777fb",514:"116764bd",550:"263f2266",559:"e300b666",637:"04767ddc",728:"ff89c437",763:"9eb891ab",779:"c4903e22",793:"7ec39821",821:"50e8647c",863:"b839bc68",911:"34bfcbb8",913:"f1c452f7",966:"c6c7a27a",1014:"0a1bf1be",1037:"868865cd",1056:"2e85c605",1059:"8316bb33",1065:"8346d600",1067:"c13c8d82",1084:"40a14955",1135:"c4794792",1244:"d17722f8",1261:"200b36b8",1280:"3459eb36",1284:"ac9d7105",1298:"e1932ddf",1309:"8ce2cc70",1317:"8fc6a14e",1321:"8677b0e2",1324:"2eeb6054",1348:"852747df",1356:"58bab6f5",1358:"389df322",1367:"4f52e2c1",1380:"774977b9",1381:"46751a68",1389:"abe02502",1396:"304f611b",1506:"86d13f08",1567:"056a69b4",1595:"4aec0d98",1601:"4739d856",1617:"cab302d2",1629:"e89bf41c",1638:"21a32d60",1672:"670bff35",1698:"e8e5f480",1701:"496e2f5c",1727:"aa5304bb",1735:"df84af31",1743:"86f5d431",1768:"7d6c1213",1819:"cee12f16",1849:"db81de9d",1906:"9f28cc2a",1924:"c577c0d9",1966:"0cbc9668",2066:"c3b23be6",2115:"a58d0dbc",2124:"f14034a8",2163:"3e1e62ca",2201:"8d148709",2206:"84fb8e56",2218:"beff1ed1",2250:"d9da0a6b",2295:"4ab6eed6",2302:"0e0d5e90",2341:"45273b48",2376:"990533ff",2399:"931b3631",2421:"20c32a23",2451:"5c5ad60e",2525:"b4127711",2536:"e3dc6a03",2551:"dd1b8c40",2600:"479a6b44",2626:"fcfd7367",2649:"5ef379ab",2659:"22bd81ef",2684:"4864a6cb",2801:"2f86505e",2841:"94eb25b0",2898:"8bef115d",2914:"01807acf",2925:"935697c2",2930:"a85eac6d",2970:"5655a6d5",3007:"24045c19",3015:"73c7f255",3019:"b2e60a70",3104:"a3e63893",3106:"00ea1bab",3108:"178fcfeb",3114:"4b111030",3156:"4ad87429",3165:"cc1265f7",3171:"c7342b03",3186:"24413443",3237:"4b1dff54",3294:"9f25e2b2",3334:"9d00ae76",3335:"25e17ff8",3375:"e5853741",3451:"480ac28c",3459:"3eb76891",3527:"58706e60",3551:"d0fc83fd",3569:"ec75ea31",3575:"cd328d6b",3610:"6fcaf4e3",3614:"73cf28ce",3659:"9829f217",3721:"6e0779b7",3723:"278993e4",3743:"1a035772",3751:"f2161fcd",3752:"7c3fe38f",3762:"d43618bf",3789:"04b45f76",3790:"0d4de16f",3814:"00465c0b",3820:"6e1ab86e",3878:"610c35d1",3891:"f72a5735",3892:"ada90aed",3901:"834725f1",3941:"2712e828",3984:"08f8682b",4001:"a0934960",4015:"6b26d853",4074:"dc1e8ab4",4089:"bcf41329",4092:"9986218e",4102:"86396385",4121:"cd5b5b3d",4139:"382a2477",4190:"17ac9ef1",4195:"98eaf8b4",4226:"21a1a5a7",4230:"29273046",4261:"258dc27f",4275:"97950d5a",4328:"3c7fe011",4361:"63327a55",4479:"2fdf126a",4497:"4f86a8cd",4506:"0b055596",4518:"e078b181",4597:"3da8bb1f",4641:"9361363c",4670:"fd197a31",4698:"33cac92c",4705:"72a06f20",4717:"68813286",4788:"c461f876",4802:"d1361500",4803:"4b77a207",4821:"a593b19a",4845:"07da2c57",4850:"5f022655",4931:"138d3cc2",4936:"f4446c95",4949:"0ec33d3f",4968:"7677aed5",4974:"6744cb79",4985:"f6322072",5037:"a5fbff25",5044:"8b6ab0e9",5047:"742cff2c",5050:"ee4d34a2",5100:"ae4efdb7",5114:"53f7f6ed",5146:"ce7a1a3f",5188:"fe5f1b68",5201:"5cd52e38",5290:"5242cdc1",5307:"cd754b0f",5312:"9fc5d72f",5426:"13d9575d",5439:"e6d75562",5492:"23491c44",5500:"ad106193",5516:"68bc200c",5550:"3c942df0",5599:"42ce2c0e",5615:"7c22b5ad",5653:"fffdcf8c",5658:"4209df99",5667:"5f621236",5683:"88e72369",5745:"1eec78d1",5784:"33269912",5805:"dcfadf8a",5815:"89e39bb2",5853:"3f8abe98",5867:"655806ca",5870:"798f6c81",5928:"e978628a",5937:"e78269be",5979:"197dc2c9",5980:"226bb5e1",5992:"41011fdc",5998:"ac6136bc",6023:"a1506d52",6045:"6ce347a1",6047:"ec3a843a",6060:"ae10ed8b",6066:"c78f7afc",6082:"27d28384",6106:"eebe35b5",6123:"fa5f0eeb",6154:"4acd61bf",6170:"91212703",6202:"a38d821d",6205:"e3131514",6216:"5ebdd8a0",6221:"a26cfee2",6347:"15eb26a9",6365:"42453980",6425:"572fcc59",6470:"710da58a",6487:"ba9eafae",6502:"a648e4e7",6517:"bd2d99fa",6553:"4e8a8e33",6584:"df33141b",6593:"4a5371fc",6615:"e13825b5",6657:"4bdbf84c",6667:"2fea1bac",6709:"ef6cdf69",6754:"42bf3974",6779:"8f108577",6835:"ef10f950",6919:"7696289d",6946:"359023f6",6972:"79cff386",6973:"1c7a3e5e",6980:"d1430428",7005:"f5ef9cde",7040:"42a1439a",7119:"cdc5bae9",7128:"1827468e",7269:"519e03cf",7271:"e87f977e",7292:"440c7a8f",7338:"fbfcfbdd",7367:"d38a5a68",7400:"f86e26cd",7422:"d9cc4236",7428:"fae137e5",7459:"893f6d83",7466:"392273b9",7470:"90217184",7502:"e8cba6bf",7530:"f13da92a",7599:"e93defab",7603:"ab851359",7607:"cab935d5",7615:"3dbe520b",7642:"a614429f",7644:"f500f40c",7676:"e503482c",7678:"41ad8dbf",7682:"9efffa84",7719:"57721fbf",7729:"7dd57155",7733:"39e1db79",7809:"be4fcdc4",7811:"02da5a1f",7813:"c7338115",7826:"01afc633",7846:"5be02699",7880:"95b22506",7894:"aeeda7b1",7903:"ff68a5db",7918:"15777b09",7919:"8f492131",7982:"beab6265",7992:"9d60d6f4",8003:"06a3b010",8006:"96902377",8087:"c68bc9d2",8145:"3ceec6e0",8252:"6b5bfb43",8255:"31ef19d5",8314:"80673733",8338:"e0cb42b2",8345:"194edf9f",8359:"de2d826f",8361:"a248ce8b",8403:"e3b9b680",8413:"f2efb99f",8440:"2bc1d91e",8443:"7e816643",8454:"a5c12fe6",8464:"294ce184",8468:"efa6971b",8477:"621fe574",8520:"d9580897",8559:"200c71a0",8560:"cfa9b65b",8562:"ee22c58f",8563:"d71587d0",8643:"600d2692",8681:"4ed3477a",8712:"83f2098b",8737:"f5a2f40b",8760:"bcd65bbf",8849:"a79209de",8851:"9f30f3bd",8855:"34338e5b",8879:"4a107983",8922:"eb31162e",8924:"f9c7caed",8950:"b4dc5bc4",8962:"5e8cc09c",8969:"e08386b2",8986:"fafd6e9b",8997:"f9098a98",9008:"3a3236cf",9012:"dddefdb9",9023:"ed20f1c0",9032:"626fbd8d",9045:"ab3ddf7d",9047:"c8a1eef8",9074:"c4d79b00",9083:"0053268a",9103:"70308f71",9156:"b017c60f",9184:"6591c182",9190:"6e82c14f",9231:"ae5d31f2",9290:"c3097b34",9342:"101a6b44",9348:"919c432c",9353:"af0421a4",9355:"9dab3496",9392:"2ce7bd64",9398:"09b1848e",9405:"ad2fb4fa",9461:"a14ffebf",9499:"10c33b72",9514:"550fc046",9528:"10e72a97",9546:"85e3130f",9566:"0bf155e1",9572:"bac2a656",9589:"9203f42d",9590:"627f10c1",9605:"afd8e52c",9627:"e4163fc8",9636:"281dcb57",9677:"3b504a1c",9688:"878c8e9a",9743:"aad68954",9769:"2ac0d29b",9774:"f24c9567",9783:"4fa12fee",9884:"4dff70b9",9924:"5f4bb7b3",9933:"4f71a059",9948:"9af8a21d",9966:"db1d990c",9993:"9efb00bc"}[e]+".js",t.miniCssF=e=>{},t.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),t.o=(e,c)=>Object.prototype.hasOwnProperty.call(e,c),b={},a="redis-developer-hub:",t.l=(e,c,d,f)=>{if(b[e])b[e].push(c);else{var r,o;if(void 0!==d)for(var n=document.getElementsByTagName("script"),i=0;i{r.onerror=r.onload=null,clearTimeout(s);var a=b[e];if(delete b[e],r.parentNode&&r.parentNode.removeChild(r),a&&a.forEach((e=>e(d))),c)return c(d)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:r}),12e4);r.onerror=l.bind(null,r.onerror),r.onload=l.bind(null,r.onload),o&&document.head.appendChild(r)}},t.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},t.p="/",t.gca=function(e){return e={13155904:"6754",17896441:"7918",19987802:"6154",47016385:"9461",64867942:"4597",71898787:"3551",78895142:"7919",93148877:"457",94686630:"2925","642fde82":"4","4504686e":"37","69c2ba4e":"46","935f2afb":"53","1900cdc0":"61","1dc33e9c":"87",f88d2135:"116","9d90cc60":"134","185a5f78":"141","1e115aba":"149",e133680b:"153","9eeb107e":"221","2f1cd5d9":"247",d661e85d:"342","085ffbd7":"370","1cf06b0d":"409",c1ab566a:"459","1ef24e58":"464","90f408d1":"514",bf9dc990:"550",bbaff741:"559",fa8b3f88:"637","963a2101":"728","3483a674":"763","732072df":"779","910d84be":"793","45db1732":"821",dfeb6cfd:"863","2bc54e2b":"911","73e252ed":"913","54399a42":"966","3c066edc":"1014","886ca5ea":"1037",c32ed07a:"1056","64f3f413":"1059","335cbbbb":"1065",cefa0e41:"1067","0c61a2aa":"1084","4cfa8051":"1135","6f05ff58":"1244","3cbbe40c":"1261","710646cd":"1280",b51fdc8c:"1284","5c1a86d3":"1298","872db8be":"1309","098330c0":"1317","7ea171e5":"1321","0b9646e8":"1324",db1d58d4:"1348","899fdd9f":"1356","49623f30":"1358","85d526b8":"1367","21bf2b11":"1380","37c04a16":"1381","5d14e41e":"1389",c3151dc9:"1396","7460fc1d":"1506","51a7d035":"1567","5c3c7026":"1595","55ccde6e":"1601","96c61e5d":"1617","99cb7080":"1629","4843ea13":"1638","2e56b509":"1672","4141cbdd":"1698",e7e99a29:"1701","7f1e28a5":"1727","00d7f298":"1735",c65a5e23:"1743","99eb9860":"1768","81e8711f":"1819",f5985471:"1849","2c06bb79":"1906",c2cefeac:"1924",d3f14484:"1966","97a92876":"2066",f8bde386:"2115",e14e08fc:"2124","0545e01d":"2163","65a1cf65":"2201",d888d60e:"2206",fd0bff62:"2218",f242a466:"2250","31ead50b":"2295","3bb72ea1":"2302","9daaef4f":"2341","7d0548cb":"2376","70a5ec54":"2399","169d51e4":"2421","8632c9a0":"2451","163f4d81":"2525",c98631ab:"2536","6b968562":"2551",c8a0be4d:"2600","099d618a":"2626",f8c0f22b:"2649","730e35c4":"2659",ad08431a:"2684","470c62dd":"2801",e7fe152c:"2841",e1816353:"2898",feb7b0b4:"2914",b949b843:"2930",df84b43c:"2970",af72bfd1:"3007","129ce714":"3015","8083ca96":"3019","4d561b09":"3104",b0c5f580:"3106","2c83c4e0":"3108","552ba15d":"3114","9d845a85":"3156",b88966c9:"3165",d119b4b9:"3171","6b006b96":"3186","1df93b7f":"3237","0c264ecc":"3294","3962b4b6":"3334",f19dd2c1:"3335",fb01d865:"3375","6410f0dc":"3451","7654669a":"3459",b4c6035f:"3527","74ac41dd":"3569",dce58c9e:"3575","584963dc":"3610","994c0519":"3659",aa02f6e6:"3721",e1c2ddaa:"3723",e9b9d3de:"3743","3720c009":"3751",c1e8360a:"3752","2d271c04":"3762",f1811081:"3789","98d604d5":"3790",b8f4daa2:"3814",f23cc535:"3820",c551111a:"3878",e1d45d33:"3891","81b2d599":"3892","265284fc":"3901",fe9a220a:"3984","45c1b0fa":"4001",d329623b:"4015","0ec1cc91":"4074",ebafc251:"4089","0c9e8015":"4092",bbc4d579:"4102","55960ee5":"4121","076ede8c":"4139",aa630107:"4190","14612fe5":"4195","1ecc9877":"4226",e65f8228:"4230",db91b37d:"4261","329b0232":"4275",ba6f6003:"4328",e1610694:"4361",b3b34ca6:"4479","5c25e952":"4497","879b2dca":"4506","6e373ae3":"4518","1d1b3f81":"4641",a9a3a51c:"4670","29eff2aa":"4698","106fc9f0":"4705","932ef172":"4717","2cc2cea1":"4788","9c28af5e":"4802","20ee7761":"4803",a8be125d:"4821",d63023de:"4845",c6304617:"4850","7f823891":"4931","6c272831":"4936",fb3c2ee3:"4949","47f30d1f":"4968","6980e26f":"4974","34bbed6c":"4985",e7f62945:"5037","9f69dad8":"5044","1ff98f28":"5047",e8e1f04a:"5050",a5b41932:"5100","4c603040":"5114","28b9548b":"5146",b061d76e:"5188","31350f16":"5201",b400c9cd:"5290",b3fefca0:"5307","1341f4ef":"5312","51ede774":"5426",f57bd6c4:"5439",ada40f98:"5492",c414f47b:"5500","18edca16":"5516",fba0c1ab:"5550","5a6c6630":"5599","23200c1b":"5615","03506951":"5653","3d95878d":"5658","1ab3d3f0":"5667",a5c3d9e9:"5683",d866cdfa:"5745","110bbf9d":"5784",d3555a77:"5805","165403c2":"5815",b0c8cd2e:"5853",f826be22:"5867","4b53257e":"5870",a3dbe69d:"5928","9292d398":"5937","46c09b1c":"5979",c1bb9856:"5980",f7773721:"5992",b2c8b850:"5998",eed077d2:"6023",d7f02a39:"6045","12853b3b":"6047","4be55f35":"6060",ccdc297e:"6082",eb3f2693:"6106",c554cc93:"6123","9a2b95c5":"6170","862d5cd0":"6202","1282e92b":"6205","6c249fdd":"6216","434536ad":"6221","16138ba5":"6347","2d198a95":"6365","18050ae3":"6425",c5c6e5e3:"6470",eaf8982a:"6487","13ca8dc8":"6502","84711cce":"6517","2601a326":"6553",e83211e5:"6584",a3302592:"6593","50b6ea97":"6615","965be803":"6657","0c306a96":"6667","2123ba4c":"6709","4c591bd5":"6779","9e5bb348":"6835","8ec5e180":"6919","6b6fd196":"6946","07a6fe21":"6972","214a573e":"6973",e49a9233:"6980","4e18a229":"7005","531e7017":"7040",fd375cb3:"7119",eb92c419:"7128","9f54a902":"7269","65be7fcc":"7271","8fea5263":"7292","613fe82a":"7338",aa68f0e9:"7367","4c958ddc":"7400","44556e56":"7422",e2919432:"7428","5b944e4b":"7459","46b3dd76":"7466","5450f727":"7470","3b0e0d3a":"7502",d9fa573c:"7530",cd548de4:"7599","1cc9ce96":"7603","1756b4ab":"7607",bfe77eb6:"7615","90e92e2d":"7642",bc14dfd3:"7644",ba498387:"7676",d9a0a71f:"7678",fd091685:"7682","2b69ed49":"7719",ddd418a4:"7729","16a31933":"7733","2cac92cf":"7809","8d1d1c2e":"7811","8632df87":"7813","7a460b64":"7826",d439d214:"7846",bda60f59:"7880",ed35ad37:"7894","59bac03b":"7903","8541537e":"7982","0abd8cd0":"7992","8460a6e0":"8003","53b913eb":"8006","405b4698":"8087","83a556d4":"8145",d69fc27c:"8252",dc17824a:"8255","18719bcf":"8314","223019eb":"8338","82b6bb4d":"8345","3b168288":"8359","0f0e7497":"8361","2b53ff98":"8403",c978831f:"8413",a33ab7b5:"8440","806d421f":"8443",f9ed4a2e:"8454",b59188bc:"8464","24585ef6":"8468",f251a97b:"8477","0edeee5a":"8520",a69121a9:"8559",f7282e3d:"8560",c42ebdb1:"8562",b697db3c:"8563",f2d3ddf1:"8643","9a2aa159":"8681",c3e7abab:"8712","412a9523":"8737","389b810a":"8760",c34f2f42:"8849","5ef34981":"8851",b67b88ed:"8855","3d36e868":"8879","0b7ca231":"8922",cc1ddd8c:"8924",ba3bf6d5:"8950",cb3ee170:"8962","5420bc6f":"8969",a55e4082:"8986","4c484ee6":"8997",ab704c68:"9008",f3dbe95e:"9012","651422d5":"9023","27dc3412":"9032","94f8b490":"9045","84ad0742":"9047","4393e4bc":"9074","844f5f7b":"9083",b6e690a8:"9103","70771f47":"9156","9918870c":"9184","9d037edf":"9190","4458e1ed":"9231",a6e0ba43:"9290","5b01a645":"9342","0ca7b406":"9348",b70c40d3:"9353",d5484ed9:"9355",ab39e009:"9392",eab4b0fe:"9398","344dca25":"9405","2e86cab0":"9499","1be78505":"9514","700fba64":"9528",a1e09814:"9546",e1a3fa89:"9566","7a1dcf5b":"9572","95a8df98":"9589","88c087fa":"9590","81fab8ae":"9605",eaf6f8ac:"9627","0776bfb3":"9636","67747c0c":"9677",e368a9fb:"9688","0fdc98ff":"9743",f4457846:"9769","42f12bbb":"9774","11166ec1":"9783","92dce1fb":"9884",df203c0f:"9924","16ae7048":"9933","531549fe":"9948",c9c86724:"9966","0566ee4e":"9993"}[e]||e,t.p+t.u(e)},(()=>{var e={1303:0,532:0};t.f.j=(c,d)=>{var b=t.o(e,c)?e[c]:void 0;if(0!==b)if(b)d.push(b[2]);else if(/^(1303|532)$/.test(c))e[c]=0;else{var a=new Promise(((d,a)=>b=e[c]=[d,a]));d.push(b[2]=a);var f=t.p+t.u(c),r=new Error;t.l(f,(d=>{if(t.o(e,c)&&(0!==(b=e[c])&&(e[c]=void 0),b)){var a=d&&("load"===d.type?"missing":d.type),f=d&&d.target&&d.target.src;r.message="Loading chunk "+c+" failed.\n("+a+": "+f+")",r.name="ChunkLoadError",r.type=a,r.request=f,b[1](r)}}),"chunk-"+c,c)}},t.O.j=c=>0===e[c];var c=(c,d)=>{var b,a,f=d[0],r=d[1],o=d[2],n=0;if(f.some((c=>0!==e[c]))){for(b in r)t.o(r,b)&&(t.m[b]=r[b]);if(o)var i=o(t)}for(c&&c(d);n{"use strict";var e,c,d,b,a,f={},r={};function t(e){var c=r[e];if(void 0!==c)return c.exports;var d=r[e]={exports:{}};return f[e].call(d.exports,d,d.exports,t),d.exports}t.m=f,e=[],t.O=(c,d,b,a)=>{if(!d){var f=1/0;for(i=0;i=a)&&Object.keys(t.O).every((e=>t.O[e](d[o])))?d.splice(o--,1):(r=!1,a0&&e[i-1][2]>a;i--)e[i]=e[i-1];e[i]=[d,b,a]},t.n=e=>{var c=e&&e.__esModule?()=>e.default:()=>e;return t.d(c,{a:c}),c},d=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,t.t=function(e,b){if(1&b&&(e=this(e)),8&b)return e;if("object"==typeof e&&e){if(4&b&&e.__esModule)return e;if(16&b&&"function"==typeof e.then)return e}var a=Object.create(null);t.r(a);var f={};c=c||[null,d({}),d([]),d(d)];for(var r=2&b&&e;"object"==typeof r&&!~c.indexOf(r);r=d(r))Object.getOwnPropertyNames(r).forEach((c=>f[c]=()=>e[c]));return f.default=()=>e,t.d(a,f),a},t.d=(e,c)=>{for(var d in c)t.o(c,d)&&!t.o(e,d)&&Object.defineProperty(e,d,{enumerable:!0,get:c[d]})},t.f={},t.e=e=>Promise.all(Object.keys(t.f).reduce(((c,d)=>(t.f[d](e,c),c)),[])),t.u=e=>"assets/js/"+({4:"642fde82",37:"4504686e",46:"69c2ba4e",53:"935f2afb",61:"1900cdc0",87:"1dc33e9c",116:"f88d2135",134:"9d90cc60",141:"185a5f78",149:"1e115aba",153:"e133680b",221:"9eeb107e",247:"2f1cd5d9",342:"d661e85d",370:"085ffbd7",409:"1cf06b0d",457:"93148877",459:"c1ab566a",464:"1ef24e58",514:"90f408d1",550:"bf9dc990",559:"bbaff741",637:"fa8b3f88",728:"963a2101",763:"3483a674",779:"732072df",793:"910d84be",821:"45db1732",863:"dfeb6cfd",911:"2bc54e2b",913:"73e252ed",966:"54399a42",1014:"3c066edc",1037:"886ca5ea",1056:"c32ed07a",1059:"64f3f413",1065:"335cbbbb",1067:"cefa0e41",1084:"0c61a2aa",1135:"4cfa8051",1244:"6f05ff58",1261:"3cbbe40c",1280:"710646cd",1284:"b51fdc8c",1298:"5c1a86d3",1309:"872db8be",1317:"098330c0",1321:"7ea171e5",1324:"0b9646e8",1348:"db1d58d4",1356:"899fdd9f",1358:"49623f30",1367:"85d526b8",1380:"21bf2b11",1381:"37c04a16",1389:"5d14e41e",1396:"c3151dc9",1506:"7460fc1d",1567:"51a7d035",1595:"5c3c7026",1601:"55ccde6e",1617:"96c61e5d",1629:"99cb7080",1638:"4843ea13",1672:"2e56b509",1698:"4141cbdd",1701:"e7e99a29",1727:"7f1e28a5",1735:"00d7f298",1743:"c65a5e23",1768:"99eb9860",1819:"81e8711f",1849:"f5985471",1906:"2c06bb79",1924:"c2cefeac",1966:"d3f14484",2066:"97a92876",2115:"f8bde386",2124:"e14e08fc",2163:"0545e01d",2201:"65a1cf65",2206:"d888d60e",2218:"fd0bff62",2250:"f242a466",2295:"31ead50b",2302:"3bb72ea1",2341:"9daaef4f",2376:"7d0548cb",2399:"70a5ec54",2421:"169d51e4",2451:"8632c9a0",2525:"163f4d81",2536:"c98631ab",2551:"6b968562",2600:"c8a0be4d",2626:"099d618a",2649:"f8c0f22b",2659:"730e35c4",2684:"ad08431a",2801:"470c62dd",2841:"e7fe152c",2898:"e1816353",2914:"feb7b0b4",2925:"94686630",2930:"b949b843",2970:"df84b43c",3007:"af72bfd1",3015:"129ce714",3019:"8083ca96",3104:"4d561b09",3106:"b0c5f580",3108:"2c83c4e0",3114:"552ba15d",3156:"9d845a85",3165:"b88966c9",3171:"d119b4b9",3186:"6b006b96",3237:"1df93b7f",3294:"0c264ecc",3334:"3962b4b6",3335:"f19dd2c1",3375:"fb01d865",3451:"6410f0dc",3459:"7654669a",3527:"b4c6035f",3551:"71898787",3569:"74ac41dd",3575:"dce58c9e",3610:"584963dc",3659:"994c0519",3721:"aa02f6e6",3723:"e1c2ddaa",3743:"e9b9d3de",3751:"3720c009",3752:"c1e8360a",3762:"2d271c04",3789:"f1811081",3790:"98d604d5",3814:"b8f4daa2",3820:"f23cc535",3878:"c551111a",3891:"e1d45d33",3892:"81b2d599",3901:"265284fc",3984:"fe9a220a",4001:"45c1b0fa",4015:"d329623b",4074:"0ec1cc91",4089:"ebafc251",4092:"0c9e8015",4102:"bbc4d579",4121:"55960ee5",4139:"076ede8c",4190:"aa630107",4195:"14612fe5",4226:"1ecc9877",4230:"e65f8228",4261:"db91b37d",4275:"329b0232",4328:"ba6f6003",4361:"e1610694",4479:"b3b34ca6",4497:"5c25e952",4506:"879b2dca",4518:"6e373ae3",4597:"64867942",4641:"1d1b3f81",4670:"a9a3a51c",4698:"29eff2aa",4705:"106fc9f0",4717:"932ef172",4788:"2cc2cea1",4802:"9c28af5e",4803:"20ee7761",4821:"a8be125d",4845:"d63023de",4850:"c6304617",4931:"7f823891",4936:"6c272831",4949:"fb3c2ee3",4968:"47f30d1f",4974:"6980e26f",4985:"34bbed6c",5037:"e7f62945",5044:"9f69dad8",5047:"1ff98f28",5050:"e8e1f04a",5100:"a5b41932",5114:"4c603040",5146:"28b9548b",5188:"b061d76e",5201:"31350f16",5290:"b400c9cd",5307:"b3fefca0",5312:"1341f4ef",5426:"51ede774",5439:"f57bd6c4",5492:"ada40f98",5500:"c414f47b",5516:"18edca16",5550:"fba0c1ab",5599:"5a6c6630",5615:"23200c1b",5653:"03506951",5658:"3d95878d",5667:"1ab3d3f0",5683:"a5c3d9e9",5745:"d866cdfa",5784:"110bbf9d",5805:"d3555a77",5815:"165403c2",5853:"b0c8cd2e",5867:"f826be22",5870:"4b53257e",5928:"a3dbe69d",5937:"9292d398",5979:"46c09b1c",5980:"c1bb9856",5992:"f7773721",5998:"b2c8b850",6023:"eed077d2",6045:"d7f02a39",6047:"12853b3b",6060:"4be55f35",6082:"ccdc297e",6106:"eb3f2693",6123:"c554cc93",6154:"19987802",6170:"9a2b95c5",6202:"862d5cd0",6205:"1282e92b",6216:"6c249fdd",6221:"434536ad",6347:"16138ba5",6365:"2d198a95",6425:"18050ae3",6470:"c5c6e5e3",6487:"eaf8982a",6502:"13ca8dc8",6517:"84711cce",6553:"2601a326",6584:"e83211e5",6593:"a3302592",6615:"50b6ea97",6657:"965be803",6667:"0c306a96",6709:"2123ba4c",6754:"13155904",6779:"4c591bd5",6835:"9e5bb348",6919:"8ec5e180",6946:"6b6fd196",6972:"07a6fe21",6973:"214a573e",6980:"e49a9233",7005:"4e18a229",7040:"531e7017",7119:"fd375cb3",7128:"eb92c419",7269:"9f54a902",7271:"65be7fcc",7292:"8fea5263",7338:"613fe82a",7367:"aa68f0e9",7400:"4c958ddc",7422:"44556e56",7428:"e2919432",7459:"5b944e4b",7466:"46b3dd76",7470:"5450f727",7502:"3b0e0d3a",7530:"d9fa573c",7599:"cd548de4",7603:"1cc9ce96",7607:"1756b4ab",7615:"bfe77eb6",7642:"90e92e2d",7644:"bc14dfd3",7676:"ba498387",7678:"d9a0a71f",7682:"fd091685",7719:"2b69ed49",7729:"ddd418a4",7733:"16a31933",7809:"2cac92cf",7811:"8d1d1c2e",7813:"8632df87",7826:"7a460b64",7846:"d439d214",7880:"bda60f59",7894:"ed35ad37",7903:"59bac03b",7918:"17896441",7919:"78895142",7982:"8541537e",7992:"0abd8cd0",8003:"8460a6e0",8006:"53b913eb",8087:"405b4698",8145:"83a556d4",8252:"d69fc27c",8255:"dc17824a",8314:"18719bcf",8338:"223019eb",8345:"82b6bb4d",8359:"3b168288",8361:"0f0e7497",8403:"2b53ff98",8413:"c978831f",8440:"a33ab7b5",8443:"806d421f",8454:"f9ed4a2e",8464:"b59188bc",8468:"24585ef6",8477:"f251a97b",8520:"0edeee5a",8559:"a69121a9",8560:"f7282e3d",8562:"c42ebdb1",8563:"b697db3c",8643:"f2d3ddf1",8681:"9a2aa159",8712:"c3e7abab",8737:"412a9523",8760:"389b810a",8849:"c34f2f42",8851:"5ef34981",8855:"b67b88ed",8879:"3d36e868",8922:"0b7ca231",8924:"cc1ddd8c",8950:"ba3bf6d5",8962:"cb3ee170",8969:"5420bc6f",8986:"a55e4082",8997:"4c484ee6",9008:"ab704c68",9012:"f3dbe95e",9023:"651422d5",9032:"27dc3412",9045:"94f8b490",9047:"84ad0742",9074:"4393e4bc",9083:"844f5f7b",9103:"b6e690a8",9156:"70771f47",9184:"9918870c",9190:"9d037edf",9231:"4458e1ed",9290:"a6e0ba43",9342:"5b01a645",9348:"0ca7b406",9353:"b70c40d3",9355:"d5484ed9",9392:"ab39e009",9398:"eab4b0fe",9405:"344dca25",9461:"47016385",9499:"2e86cab0",9514:"1be78505",9528:"700fba64",9546:"a1e09814",9566:"e1a3fa89",9572:"7a1dcf5b",9589:"95a8df98",9590:"88c087fa",9605:"81fab8ae",9627:"eaf6f8ac",9636:"0776bfb3",9677:"67747c0c",9688:"e368a9fb",9743:"0fdc98ff",9769:"f4457846",9774:"42f12bbb",9783:"11166ec1",9884:"92dce1fb",9924:"df203c0f",9933:"16ae7048",9948:"531549fe",9966:"c9c86724",9993:"0566ee4e"}[e]||e)+"."+{4:"4e9a5cf9",37:"d0c0ee9a",46:"ee8a0396",53:"db3fa784",61:"a77abad9",87:"615b8dd3",116:"d88674bc",134:"1484cdb7",141:"9fc44a3a",149:"b0d3139a",153:"b83a2475",221:"bb3028d5",247:"5db2e7c5",342:"4b622c56",370:"4f5184c5",409:"e05c44db",457:"d201e113",459:"17ea8335",464:"c3e3948c",486:"e1f777fb",514:"116764bd",550:"263f2266",559:"e300b666",637:"04767ddc",728:"ff89c437",763:"9eb891ab",779:"c4903e22",793:"7ec39821",821:"50e8647c",863:"b839bc68",911:"34bfcbb8",913:"f1c452f7",966:"c6c7a27a",1014:"0a1bf1be",1037:"868865cd",1056:"2e85c605",1059:"8316bb33",1065:"8346d600",1067:"c13c8d82",1084:"40a14955",1135:"c4794792",1244:"d17722f8",1261:"200b36b8",1280:"3459eb36",1284:"ac9d7105",1298:"e1932ddf",1309:"cbbddffb",1317:"8fc6a14e",1321:"8677b0e2",1324:"2eeb6054",1348:"852747df",1356:"58bab6f5",1358:"389df322",1367:"4f52e2c1",1380:"7a3f0c91",1381:"46751a68",1389:"abe02502",1396:"304f611b",1506:"86d13f08",1567:"056a69b4",1595:"4aec0d98",1601:"4739d856",1617:"cab302d2",1629:"e89bf41c",1638:"21a32d60",1672:"670bff35",1698:"e8e5f480",1701:"496e2f5c",1727:"aa5304bb",1735:"df84af31",1743:"86f5d431",1768:"7d6c1213",1819:"cee12f16",1849:"db81de9d",1906:"9f28cc2a",1924:"c577c0d9",1966:"0cbc9668",2066:"c3b23be6",2115:"a58d0dbc",2124:"f14034a8",2163:"3e1e62ca",2201:"8d148709",2206:"84fb8e56",2218:"beff1ed1",2250:"d9da0a6b",2295:"4ab6eed6",2302:"0e0d5e90",2341:"45273b48",2376:"990533ff",2399:"931b3631",2421:"20c32a23",2451:"5c5ad60e",2525:"b4127711",2536:"e3dc6a03",2551:"dd1b8c40",2600:"479a6b44",2626:"fcfd7367",2649:"5ef379ab",2659:"22bd81ef",2684:"4864a6cb",2801:"2f86505e",2841:"94eb25b0",2898:"8bef115d",2914:"01807acf",2925:"935697c2",2930:"a85eac6d",2970:"5655a6d5",3007:"24045c19",3015:"73c7f255",3019:"b2e60a70",3104:"a3e63893",3106:"00ea1bab",3108:"178fcfeb",3114:"4b111030",3156:"4ad87429",3165:"cc1265f7",3171:"c7342b03",3186:"24413443",3237:"4b1dff54",3294:"9f25e2b2",3334:"9d00ae76",3335:"25e17ff8",3375:"e5853741",3451:"480ac28c",3459:"3eb76891",3527:"58706e60",3551:"d0fc83fd",3569:"ec75ea31",3575:"cd328d6b",3610:"6fcaf4e3",3614:"73cf28ce",3659:"9829f217",3721:"6e0779b7",3723:"278993e4",3743:"1a035772",3751:"f2161fcd",3752:"7c3fe38f",3762:"d43618bf",3789:"04b45f76",3790:"0d4de16f",3814:"00465c0b",3820:"516e7aad",3878:"06a383c6",3891:"f72a5735",3892:"ada90aed",3901:"834725f1",3941:"2712e828",3984:"08f8682b",4001:"a0934960",4015:"6b26d853",4074:"dc1e8ab4",4089:"bcf41329",4092:"9986218e",4102:"86396385",4121:"cd5b5b3d",4139:"382a2477",4190:"17ac9ef1",4195:"98eaf8b4",4226:"21a1a5a7",4230:"29273046",4261:"258dc27f",4275:"97950d5a",4328:"3c7fe011",4361:"63327a55",4479:"2fdf126a",4497:"4f86a8cd",4506:"0b055596",4518:"e078b181",4597:"3da8bb1f",4641:"9361363c",4670:"fd197a31",4698:"33cac92c",4705:"72a06f20",4717:"68813286",4788:"c461f876",4802:"d1361500",4803:"4b77a207",4821:"a593b19a",4845:"07da2c57",4850:"5f022655",4931:"138d3cc2",4936:"f4446c95",4949:"0ec33d3f",4968:"7677aed5",4974:"6744cb79",4985:"f6322072",5037:"a5fbff25",5044:"8b6ab0e9",5047:"742cff2c",5050:"ee4d34a2",5100:"ae4efdb7",5114:"53f7f6ed",5146:"ce7a1a3f",5188:"fe5f1b68",5201:"5cd52e38",5290:"5242cdc1",5307:"cd754b0f",5312:"9fc5d72f",5426:"c006e06c",5439:"e6d75562",5492:"23491c44",5500:"ad106193",5516:"68bc200c",5550:"3c942df0",5599:"42ce2c0e",5615:"7c22b5ad",5653:"fffdcf8c",5658:"4209df99",5667:"5f621236",5683:"88e72369",5745:"1eec78d1",5784:"33269912",5805:"dcfadf8a",5815:"89e39bb2",5853:"3f8abe98",5867:"655806ca",5870:"798f6c81",5928:"e978628a",5937:"e78269be",5979:"333d8b8c",5980:"226bb5e1",5992:"41011fdc",5998:"ac6136bc",6023:"a1506d52",6045:"6ce347a1",6047:"ec3a843a",6060:"ae10ed8b",6066:"c78f7afc",6082:"27d28384",6106:"eebe35b5",6123:"fa5f0eeb",6154:"4acd61bf",6170:"91212703",6202:"a38d821d",6205:"e3131514",6216:"5ebdd8a0",6221:"a26cfee2",6347:"15eb26a9",6365:"42453980",6425:"572fcc59",6470:"710da58a",6487:"ba9eafae",6502:"a648e4e7",6517:"bd2d99fa",6553:"4e8a8e33",6584:"df33141b",6593:"4a5371fc",6615:"e13825b5",6657:"4bdbf84c",6667:"2fea1bac",6709:"ef6cdf69",6754:"42bf3974",6779:"8f108577",6835:"ef10f950",6919:"7696289d",6946:"359023f6",6972:"79cff386",6973:"1c7a3e5e",6980:"d1430428",7005:"f5ef9cde",7040:"42a1439a",7119:"cdc5bae9",7128:"1827468e",7269:"795207ca",7271:"e87f977e",7292:"440c7a8f",7338:"fbfcfbdd",7367:"d38a5a68",7400:"f86e26cd",7422:"d9cc4236",7428:"fae137e5",7459:"893f6d83",7466:"392273b9",7470:"90217184",7502:"e8cba6bf",7530:"f13da92a",7599:"e93defab",7603:"ab851359",7607:"cab935d5",7615:"3dbe520b",7642:"a614429f",7644:"f500f40c",7676:"e503482c",7678:"41ad8dbf",7682:"9efffa84",7719:"57721fbf",7729:"2dadf504",7733:"39e1db79",7809:"be4fcdc4",7811:"02da5a1f",7813:"c7338115",7826:"d6831948",7846:"5be02699",7880:"95b22506",7894:"aeeda7b1",7903:"ff68a5db",7918:"15777b09",7919:"8f492131",7982:"beab6265",7992:"9d60d6f4",8003:"06a3b010",8006:"96902377",8087:"c68bc9d2",8145:"3ceec6e0",8252:"6b5bfb43",8255:"31ef19d5",8314:"80673733",8338:"e0cb42b2",8345:"194edf9f",8359:"de2d826f",8361:"a248ce8b",8403:"e3b9b680",8413:"f2efb99f",8440:"2bc1d91e",8443:"7e816643",8454:"a5c12fe6",8464:"294ce184",8468:"efa6971b",8477:"621fe574",8520:"d9580897",8559:"200c71a0",8560:"cfa9b65b",8562:"ee22c58f",8563:"d71587d0",8643:"600d2692",8681:"4ed3477a",8712:"83f2098b",8737:"f5a2f40b",8760:"bcd65bbf",8849:"a79209de",8851:"9f30f3bd",8855:"34338e5b",8879:"4a107983",8922:"eb31162e",8924:"f9c7caed",8950:"b4dc5bc4",8962:"5e8cc09c",8969:"e08386b2",8986:"fafd6e9b",8997:"f9098a98",9008:"3a3236cf",9012:"dddefdb9",9023:"ed20f1c0",9032:"626fbd8d",9045:"f1f2e0a7",9047:"c8a1eef8",9074:"c4d79b00",9083:"0053268a",9103:"70308f71",9156:"b017c60f",9184:"6591c182",9190:"6e82c14f",9231:"ae5d31f2",9290:"c3097b34",9342:"101a6b44",9348:"919c432c",9353:"af0421a4",9355:"9dab3496",9392:"2ce7bd64",9398:"09b1848e",9405:"ad2fb4fa",9461:"a14ffebf",9499:"10c33b72",9514:"550fc046",9528:"10e72a97",9546:"85e3130f",9566:"0bf155e1",9572:"bac2a656",9589:"9203f42d",9590:"627f10c1",9605:"afd8e52c",9627:"e4163fc8",9636:"281dcb57",9677:"3b504a1c",9688:"878c8e9a",9743:"aad68954",9769:"2ac0d29b",9774:"f24c9567",9783:"4fa12fee",9884:"4dff70b9",9924:"5f4bb7b3",9933:"4f71a059",9948:"9af8a21d",9966:"db1d990c",9993:"9efb00bc"}[e]+".js",t.miniCssF=e=>{},t.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),t.o=(e,c)=>Object.prototype.hasOwnProperty.call(e,c),b={},a="redis-developer-hub:",t.l=(e,c,d,f)=>{if(b[e])b[e].push(c);else{var r,o;if(void 0!==d)for(var n=document.getElementsByTagName("script"),i=0;i{r.onerror=r.onload=null,clearTimeout(s);var a=b[e];if(delete b[e],r.parentNode&&r.parentNode.removeChild(r),a&&a.forEach((e=>e(d))),c)return c(d)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:r}),12e4);r.onerror=l.bind(null,r.onerror),r.onload=l.bind(null,r.onload),o&&document.head.appendChild(r)}},t.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},t.p="/",t.gca=function(e){return e={13155904:"6754",17896441:"7918",19987802:"6154",47016385:"9461",64867942:"4597",71898787:"3551",78895142:"7919",93148877:"457",94686630:"2925","642fde82":"4","4504686e":"37","69c2ba4e":"46","935f2afb":"53","1900cdc0":"61","1dc33e9c":"87",f88d2135:"116","9d90cc60":"134","185a5f78":"141","1e115aba":"149",e133680b:"153","9eeb107e":"221","2f1cd5d9":"247",d661e85d:"342","085ffbd7":"370","1cf06b0d":"409",c1ab566a:"459","1ef24e58":"464","90f408d1":"514",bf9dc990:"550",bbaff741:"559",fa8b3f88:"637","963a2101":"728","3483a674":"763","732072df":"779","910d84be":"793","45db1732":"821",dfeb6cfd:"863","2bc54e2b":"911","73e252ed":"913","54399a42":"966","3c066edc":"1014","886ca5ea":"1037",c32ed07a:"1056","64f3f413":"1059","335cbbbb":"1065",cefa0e41:"1067","0c61a2aa":"1084","4cfa8051":"1135","6f05ff58":"1244","3cbbe40c":"1261","710646cd":"1280",b51fdc8c:"1284","5c1a86d3":"1298","872db8be":"1309","098330c0":"1317","7ea171e5":"1321","0b9646e8":"1324",db1d58d4:"1348","899fdd9f":"1356","49623f30":"1358","85d526b8":"1367","21bf2b11":"1380","37c04a16":"1381","5d14e41e":"1389",c3151dc9:"1396","7460fc1d":"1506","51a7d035":"1567","5c3c7026":"1595","55ccde6e":"1601","96c61e5d":"1617","99cb7080":"1629","4843ea13":"1638","2e56b509":"1672","4141cbdd":"1698",e7e99a29:"1701","7f1e28a5":"1727","00d7f298":"1735",c65a5e23:"1743","99eb9860":"1768","81e8711f":"1819",f5985471:"1849","2c06bb79":"1906",c2cefeac:"1924",d3f14484:"1966","97a92876":"2066",f8bde386:"2115",e14e08fc:"2124","0545e01d":"2163","65a1cf65":"2201",d888d60e:"2206",fd0bff62:"2218",f242a466:"2250","31ead50b":"2295","3bb72ea1":"2302","9daaef4f":"2341","7d0548cb":"2376","70a5ec54":"2399","169d51e4":"2421","8632c9a0":"2451","163f4d81":"2525",c98631ab:"2536","6b968562":"2551",c8a0be4d:"2600","099d618a":"2626",f8c0f22b:"2649","730e35c4":"2659",ad08431a:"2684","470c62dd":"2801",e7fe152c:"2841",e1816353:"2898",feb7b0b4:"2914",b949b843:"2930",df84b43c:"2970",af72bfd1:"3007","129ce714":"3015","8083ca96":"3019","4d561b09":"3104",b0c5f580:"3106","2c83c4e0":"3108","552ba15d":"3114","9d845a85":"3156",b88966c9:"3165",d119b4b9:"3171","6b006b96":"3186","1df93b7f":"3237","0c264ecc":"3294","3962b4b6":"3334",f19dd2c1:"3335",fb01d865:"3375","6410f0dc":"3451","7654669a":"3459",b4c6035f:"3527","74ac41dd":"3569",dce58c9e:"3575","584963dc":"3610","994c0519":"3659",aa02f6e6:"3721",e1c2ddaa:"3723",e9b9d3de:"3743","3720c009":"3751",c1e8360a:"3752","2d271c04":"3762",f1811081:"3789","98d604d5":"3790",b8f4daa2:"3814",f23cc535:"3820",c551111a:"3878",e1d45d33:"3891","81b2d599":"3892","265284fc":"3901",fe9a220a:"3984","45c1b0fa":"4001",d329623b:"4015","0ec1cc91":"4074",ebafc251:"4089","0c9e8015":"4092",bbc4d579:"4102","55960ee5":"4121","076ede8c":"4139",aa630107:"4190","14612fe5":"4195","1ecc9877":"4226",e65f8228:"4230",db91b37d:"4261","329b0232":"4275",ba6f6003:"4328",e1610694:"4361",b3b34ca6:"4479","5c25e952":"4497","879b2dca":"4506","6e373ae3":"4518","1d1b3f81":"4641",a9a3a51c:"4670","29eff2aa":"4698","106fc9f0":"4705","932ef172":"4717","2cc2cea1":"4788","9c28af5e":"4802","20ee7761":"4803",a8be125d:"4821",d63023de:"4845",c6304617:"4850","7f823891":"4931","6c272831":"4936",fb3c2ee3:"4949","47f30d1f":"4968","6980e26f":"4974","34bbed6c":"4985",e7f62945:"5037","9f69dad8":"5044","1ff98f28":"5047",e8e1f04a:"5050",a5b41932:"5100","4c603040":"5114","28b9548b":"5146",b061d76e:"5188","31350f16":"5201",b400c9cd:"5290",b3fefca0:"5307","1341f4ef":"5312","51ede774":"5426",f57bd6c4:"5439",ada40f98:"5492",c414f47b:"5500","18edca16":"5516",fba0c1ab:"5550","5a6c6630":"5599","23200c1b":"5615","03506951":"5653","3d95878d":"5658","1ab3d3f0":"5667",a5c3d9e9:"5683",d866cdfa:"5745","110bbf9d":"5784",d3555a77:"5805","165403c2":"5815",b0c8cd2e:"5853",f826be22:"5867","4b53257e":"5870",a3dbe69d:"5928","9292d398":"5937","46c09b1c":"5979",c1bb9856:"5980",f7773721:"5992",b2c8b850:"5998",eed077d2:"6023",d7f02a39:"6045","12853b3b":"6047","4be55f35":"6060",ccdc297e:"6082",eb3f2693:"6106",c554cc93:"6123","9a2b95c5":"6170","862d5cd0":"6202","1282e92b":"6205","6c249fdd":"6216","434536ad":"6221","16138ba5":"6347","2d198a95":"6365","18050ae3":"6425",c5c6e5e3:"6470",eaf8982a:"6487","13ca8dc8":"6502","84711cce":"6517","2601a326":"6553",e83211e5:"6584",a3302592:"6593","50b6ea97":"6615","965be803":"6657","0c306a96":"6667","2123ba4c":"6709","4c591bd5":"6779","9e5bb348":"6835","8ec5e180":"6919","6b6fd196":"6946","07a6fe21":"6972","214a573e":"6973",e49a9233:"6980","4e18a229":"7005","531e7017":"7040",fd375cb3:"7119",eb92c419:"7128","9f54a902":"7269","65be7fcc":"7271","8fea5263":"7292","613fe82a":"7338",aa68f0e9:"7367","4c958ddc":"7400","44556e56":"7422",e2919432:"7428","5b944e4b":"7459","46b3dd76":"7466","5450f727":"7470","3b0e0d3a":"7502",d9fa573c:"7530",cd548de4:"7599","1cc9ce96":"7603","1756b4ab":"7607",bfe77eb6:"7615","90e92e2d":"7642",bc14dfd3:"7644",ba498387:"7676",d9a0a71f:"7678",fd091685:"7682","2b69ed49":"7719",ddd418a4:"7729","16a31933":"7733","2cac92cf":"7809","8d1d1c2e":"7811","8632df87":"7813","7a460b64":"7826",d439d214:"7846",bda60f59:"7880",ed35ad37:"7894","59bac03b":"7903","8541537e":"7982","0abd8cd0":"7992","8460a6e0":"8003","53b913eb":"8006","405b4698":"8087","83a556d4":"8145",d69fc27c:"8252",dc17824a:"8255","18719bcf":"8314","223019eb":"8338","82b6bb4d":"8345","3b168288":"8359","0f0e7497":"8361","2b53ff98":"8403",c978831f:"8413",a33ab7b5:"8440","806d421f":"8443",f9ed4a2e:"8454",b59188bc:"8464","24585ef6":"8468",f251a97b:"8477","0edeee5a":"8520",a69121a9:"8559",f7282e3d:"8560",c42ebdb1:"8562",b697db3c:"8563",f2d3ddf1:"8643","9a2aa159":"8681",c3e7abab:"8712","412a9523":"8737","389b810a":"8760",c34f2f42:"8849","5ef34981":"8851",b67b88ed:"8855","3d36e868":"8879","0b7ca231":"8922",cc1ddd8c:"8924",ba3bf6d5:"8950",cb3ee170:"8962","5420bc6f":"8969",a55e4082:"8986","4c484ee6":"8997",ab704c68:"9008",f3dbe95e:"9012","651422d5":"9023","27dc3412":"9032","94f8b490":"9045","84ad0742":"9047","4393e4bc":"9074","844f5f7b":"9083",b6e690a8:"9103","70771f47":"9156","9918870c":"9184","9d037edf":"9190","4458e1ed":"9231",a6e0ba43:"9290","5b01a645":"9342","0ca7b406":"9348",b70c40d3:"9353",d5484ed9:"9355",ab39e009:"9392",eab4b0fe:"9398","344dca25":"9405","2e86cab0":"9499","1be78505":"9514","700fba64":"9528",a1e09814:"9546",e1a3fa89:"9566","7a1dcf5b":"9572","95a8df98":"9589","88c087fa":"9590","81fab8ae":"9605",eaf6f8ac:"9627","0776bfb3":"9636","67747c0c":"9677",e368a9fb:"9688","0fdc98ff":"9743",f4457846:"9769","42f12bbb":"9774","11166ec1":"9783","92dce1fb":"9884",df203c0f:"9924","16ae7048":"9933","531549fe":"9948",c9c86724:"9966","0566ee4e":"9993"}[e]||e,t.p+t.u(e)},(()=>{var e={1303:0,532:0};t.f.j=(c,d)=>{var b=t.o(e,c)?e[c]:void 0;if(0!==b)if(b)d.push(b[2]);else if(/^(1303|532)$/.test(c))e[c]=0;else{var a=new Promise(((d,a)=>b=e[c]=[d,a]));d.push(b[2]=a);var f=t.p+t.u(c),r=new Error;t.l(f,(d=>{if(t.o(e,c)&&(0!==(b=e[c])&&(e[c]=void 0),b)){var a=d&&("load"===d.type?"missing":d.type),f=d&&d.target&&d.target.src;r.message="Loading chunk "+c+" failed.\n("+a+": "+f+")",r.name="ChunkLoadError",r.type=a,r.request=f,b[1](r)}}),"chunk-"+c,c)}},t.O.j=c=>0===e[c];var c=(c,d)=>{var b,a,f=d[0],r=d[1],o=d[2],n=0;if(f.some((c=>0!==e[c]))){for(b in r)t.o(r,b)&&(t.m[b]=r[b]);if(o)var i=o(t)}for(c&&c(d);n Redis Discord Server | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Redis Discord Server

The Redis Discord server is a place where you can learn, share, and collaborate about anything and everything Redis.

  • Connect with users from the community, Redis University, and Redis Inc.
  • Get your questions answered and learn cool new tips and tricks.
  • Watch for notifications of new content from Redis and the community.
  • Share your knowledge and expertise with others.

How to join

If you already have a Discord account, joining is easy. Just go to https://discord.gg/redis and your in.

If you don't have a Discord account, you'll need to create one. Once you have an account, you can download Discord for you desktop, get the mobile app, or just use it in a browser.

Server rules

One you are on the server, you should be aware of our rules:

  • Be respectful.
  • Sending and linking harmful material will result in an immediate and permanent ban.
  • No shady links (e.g. to .jar files, or .exe files).
  • DO NOT spam. Also, DO NOT spam.
  • DO NOT use vulgar or explicit language—keep it family friendly.
  • DO NOT claim authorship of assets that are not yours. Plagiarism will result in a ban.
  • DO share your projects and ideas, but DO NOT advertise or solicit in the chat.
  • DO NOT ping @here or @everyone and DO NOT spam your messages to multiple channels.
  • DO NOT DM the admins or send us friend requests unless we invite you to do so.
  • Abide by the Redis Community Guidelines & Code of Conduct.

How to get help

Got a question you want answered? Got a problem you can’t figure out? Don’t worry. Happens to the best of us. We have a help system inspired by the one used by the most excellent Python Discord.

tl;dr

Ask a question in a channel in ❓ Help: Available. That channel moves to ✋ Help: In Use. Converse, discuss, get an answer. Close the channel with /close when you’re done and it moves to 💤 Help: Dormant.

Full Version

So, we built a help system to make it easier to ask and answer. There are a series of help channels on the server that all start with help- followed by a letter in the NATO Phonetic Alphabet (you know, the whole alfa bravo charlie thing). There are 26 channels grouped into Help: Available, Help: In Use, and Help: Dormant.

Help: Available contains channels that are available for a question. If you have a question, you can claim this channel simply by posting it there. The channel will immediately be moved to Help: In Use.

:raisedhand: Help: In Use has channels that are being used to answer a question. If you have the answer to a fellow user’s question, post it here. If you asked the question, other users may answer it. If someone has answered your question—or perhaps you answer it yourself—you can close the channel by typing /close. Channels will automatically close if they are inactive for 48 hours. Closed channels are moved to _Help: Dormant.

💤 Help: Dormant is for channels that are not currently in use. You can read them—useful as there are lots of answered questions here—but you cannot post to them. When someone claims a channel in Help: Available, a random channel is selected from Help: Dormant to replace it.

- + \ No newline at end of file diff --git a/community/index.html b/community/index.html index 91fd5ffa74..616bace800 100644 --- a/community/index.html +++ b/community/index.html @@ -4,7 +4,7 @@ Get Involved with Redis Community | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Get Involved with Redis Community

Need help with Redis? Do you want to share something cool? Want to learn more Redis? Check out some of the great community resources at your disposal:

Join the Redis Discord to get help and share your knowledge
Watch live Redis content on Redis Live
- + \ No newline at end of file diff --git a/create/aws/analytics-using-aws/index.html b/create/aws/analytics-using-aws/index.html index f14eb2ebdf..984f707fe1 100644 --- a/create/aws/analytics-using-aws/index.html +++ b/create/aws/analytics-using-aws/index.html @@ -4,7 +4,7 @@ How to Build and Deploy Your Own Analytics Dashboard using NodeJS and Redis on the AWS Platform | The Home of Redis Developers - + @@ -13,7 +13,7 @@

How to Build and Deploy Your Own Analytics Dashboard using NodeJS and Redis on the AWS Platform


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

An interactive analytics dashboard serves several purposes. They allow you to share data and provide you with all those vital information to make game-changing decisions at a faster pace. Building a real-time dynamic dashboard using a traditional relational database might require a complex set of queries. By using a NoSQL database like Redis, you can build a powerful interactive and dynamic dashboard with a small number of Redis commands.

Let’s take a look at how this was achieved.

  • What will you build?
  • What will you need?
  • Getting started
  • How does it work?
  • How data is stored
  • Navigating the application

What will you build?

You’ll build an analytics dashboard app that uses Redis Bitmap written in NodeJS (JavaScript) and then deploy it to AWS.

Ready to get started? Ok, let’s dive straight in.

What will you need?

  • NodeJS: used as an open-source, cross-platform, backend JavaScript runtime environment that executes Javascript code outside a web browser.
  • Redis Enterprise Cloud: used as a real-time database, cache, and message broker.
  • NPM: used as a package manager. It allows you to build node apps.

Getting Started

Prepare the environment

  • Install Node - v12.19.0
  • Install NPM - v6.14.8

Step 1. Sign up for a Free Redis Enterprise Cloud Account

Follow this tutorial to sign up for a free Redis Enterprise Cloud account.

image

Choose AWS as a Cloud vendor while creating your new subscription. At the end of the database creation process, you will get a Redis Enterprise CLoud database endpoint and password. You can save it for later use.

image

Step 2. Clone the repository

 git clone https://github.com/redis-developer/basic-analytics-dashboard-redis-bitmaps-nodejs

Step 3. Set up a backend environment

First we will be setting up environment variables

Go to /server folder (cd ./server) and then execute the below command:

 cp .env.example .env

Open .env file and add Redis Enterprise Cloud Database Endpoint URL, port and password as shown below:


PORT=3000

# Host and a port. Can be with `redis://` or without.
# Host and a port encoded in redis uri take precedence over other environment variable.
# preferable
REDIS_ENDPOINT_URI=redis://redis-XXXX.c212.ap-south-1-1.ec2.cloud.redislabs.com:15564

# Or you can set it here (ie. for docker development)
REDIS_HOST=redis-XXXX.c212.ap-south-1-1.ec2.cloud.redislabs.com
REDIS_PORT=XXXX

# You can set password here
REDIS_PASSWORD=reXXX

COMPOSE_PROJECT_NAME=redis-analytics-bitmaps

Step 4. Install dependencies

 npm install

Step 5. Run the backend

 npm run dev

Step 6. Set up the frontend environment

Go to the client folder (cd ./client) and then:

 cp .env.example .env

Add the exact URL path and port number of your choice for VUE_APP_API_URL parameter as shown below:

VUE_APP_API_URL=http://localhost:3000

Step 7. Install dependencies

 npm install

Step 8. Run the frontend

 npm run serve

analytics

How does it work?

How the data is stored:

The event data is stored in various keys and data types which is discussed below:

For each of time spans:
  • year: like 2021
  • month: like 2021-03 (means March of 2021)
  • day: like 2021-03-03 (means 3rd March of 2021)
  • weekOfMonth: like 2021-03/4 (means 4th week of March 2021)
  • anytime
For each of scopes:
  • source
  • action
  • source + action
  • action + page
  • userId + action
  • global
For each of data types/types:
  • count (Integer stored as String)
  • bitmap
  • set

It generates keys with the following naming convention:

 rab:{type}[:custom:{customName}][:user:{userId}][:source:{source}][:action:{action}][:page:{page}]:timeSpan:{timeSpan}

where values in [] are optional.

For each generated key like rab:count:*, data is stored like: INCR {key}

Example:
 INCR rab:count:action:addToCart:timeSpan:2015-12/3

For each generated key like: rab:set:*, data is stored like:

 SADD {key} {userId}
Example:
 SADD rab:set:action:addToCart:timeSpan:2015-12/3 8
  • For each generated key like rab:bitmap:*, data is stored like:

     SETBIT {key} {userId} 1
Example:
 SETBIT rab:bitmap:action:addToCart:timeSpan:2015-12/3 8 1

Cohort data

  • We store users who register and then bought some products (action order matters).
  • For each buy action in December we first check if the user performed register action (register counter must be greater than zero).
  • If so, we set user bit to 1
Example
  SETBIT rab:bitmap:custom:cohort-buy:timeSpan:{timeSpan} {userId} 1
  • Example - User Id 2 bought 2 products on 2015-12-17. It won't be stored.
  • Example - User Id 10 bought 1 product on 2015-12-17 and registered on 2015-12-16. So, it will be stored like:
 SETBIT rab:bitmap:custom:cohort-buy:timeSpan:2015-12 10 1
  • We assume that user cannot buy without registering first.

Retention data

  • Retention means users who bought on two different dates

  • For each buy action we check if user bought more products anytime than bought on particular day (current purchase not included).

  • If so, we add user id to set like:

     SADD rab:set:custom:retention-buy:timeSpan:{timeSpan} {userId}
  • Example - User Id 5 bought 3 products on 2015-12-15. His retention won't be stored (products bought on particular day: 2, products bought anytime: 0).

  • Example - User Id 3 bought 1 product on 2015-12-15 and before - 1 product on 2015-12-13. His retention will be stored (products bought on particular day: 0, products bought anytime: 1) like:

 SADD rab:set:custom:retention-buy:timeSpan:2015-12

How the data is accessed:

Total Traffic:

December:
  BITCOUNT rab:bitmap:custom:global:timeSpan:2015-12```
X week of December:
  BITCOUNT rab:bitmap:custom:global:timeSpan:2015-12/{X}
Example:
 BITCOUNT rab:bitmap:custom:global:timeSpan:2015-12/3

Traffic per Page ({page} is one of: homepage, product1, product2, product3):

December:
 BITCOUNT rab:bitmap:action:visit:page:{page}:timeSpan:2015-12
Example:
 BITCOUNT rab:bitmap:action:visit:page:homepage:timeSpan:2015-12
X week of December:
 BITCOUNT rab:bitmap:action:visit:page:{page}:timeSpan:2015-12/{X}
Example:
 BITCOUNT rab:bitmap:action:visit:page:product1:timeSpan:2015-12/2

Traffic per Source ({source} is one of: Google, Facebook, email, direct, referral, none):

December:
 BITCOUNT rab:bitmap:source:{source}:timeSpan:2015-12
Example:
 BITCOUNT rab:bitmap:source:referral:timeSpan:2015-12
X week of December:
 BITCOUNT rab:bitmap:source:{source}:timeSpan:2015-12/{X}
Example:
 BITCOUNT rab:bitmap:source:google:timeSpan:2015-12/1
Trend traffic ({page} is one of: homepage, product1, product2, product3):
December:

From

 BITCOUNT rab:bitmap:action:visit:{page}:timeSpan:2015-12-01

to

 BITCOUNT rab:bitmap:action:visit:{page}:timeSpan:2015-12-31
  • 1st Week of December: Similar as above, but from 2015-12-01 to 2015-12-07
  • 2nd Week of December: Similar as above, but from 2015-12-08 to 2015-12-14
  • 3rd Week of December: Similar as above, but from 2015-12-15 to 2015-12-21
  • 4th Week of December: Similar as above, but from 2015-12-22 to 2015-12-28
  • 5th Week of December: Similar as above, but from 2015-12-29 to 2015-12-31
Example:
 BITCOUNT rab:bitmap:action:visit:homepage:timeSpan:2015-12-29 => BITCOUNT rab:bitmap:action:visit:homepage:timeSpan:2015-12-30 => BITCOUNT rab:bitmap:action:visit:homepage:timeSpan:2015-12-31

Total products bought:

December:
 GET rab:count:action:buy:timeSpan:2015-12
X week of December:
 GET rab:count:action:buy:timeSpan:2015-12/{X}
Example:
 GET rab:count:action:buy:timeSpan:2015-12/1

Total products added to cart:

December:
 GET rab:count:action:addToCart:timeSpan:2015-12
X week of December:
 GET rab:count:action:addToCart:timeSpan:2015-12/{X}
Example:
 GET rab:count:action:addToCart:timeSpan:2015-12/1
Shares of products bought ({productPage} for product1, product2, product3):

December:

 GET rab:count:action:buy:page:{productPage}:timeSpan:2015-12
Example:
 GET rab:count:action:buy:page:product3:timeSpan:2015-12
X week of December:
 GET rab:count:action:buy:page:{productPage}:timeSpan:2015-12/{X}
Example:
 GET rab:count:action:buy:page:product1:timeSpan:2015-12/2

Customer and Cohort Analysis

  • People who registered: BITCOUNT rab:bitmap:action:register:timeSpan:2015-12

  • People who register then bought (order matters): BITCOUNT rab:bitmap:custom:cohort-buy:timeSpan:2015-12

  • Dropoff: (People who register then bought / People who register) * 100 [%]

  • Customers who bought only specified product ({productPage} is one of: product1, product2, product3):

     SMEMBERS rab:set:action:buy:page:{productPage}:timeSpan:2015-12
Example:
 SMEMBERS rab:set:action:buy:page:product2:timeSpan:2015-12

Customers who bought Product1 and Product2:

 SINTER rab:set:action:buy:page:product1:timeSpan:anytime rab:set:action:buy:page:product2:timeSpan:anytime

Customer Retention (customers who bought on the different dates):

 SMEMBERS rab:set:custom:retention-buy:timeSpan:anytime

References

- + \ No newline at end of file diff --git a/create/aws/bidding-on-aws/index.html b/create/aws/bidding-on-aws/index.html index be10718f9c..60aeaaa40f 100644 --- a/create/aws/bidding-on-aws/index.html +++ b/create/aws/bidding-on-aws/index.html @@ -4,7 +4,7 @@ How to Build a Real-Time Bidding Platform using NodeJS, AWS Lambda and Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

How to Build a Real-Time Bidding Platform using NodeJS, AWS Lambda and Redis


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Digital technology has propelled us forward to an exciting new era and has transformed almost every aspect of life. We’re more interconnected than ever as communication has become instant. Working from home has now become the norm, helping us pivot to a new way of working during the pandemic. And our ability to reduce carbon emissions by attending work-related events online has meant that we’ve taken greater strides to combat global warming. Continuing this trend is Shamshir Anees and his team, who have created an application that can host digital auctions. By using Redis, data transmission between components was carried out with maximum efficiency, providing users with real-time bidding updates on the dashboard.

Let’s take a look at how this was achieved. We’d also like to point out that we have a diverse range of exciting applications for you to check out on the Redis Launchpad.

  • What will you build?
  • What will you need?
  • Architecture
  • How does it work?
  • Getting started
  • How data is stored
  • Navigating the application

What will you build?

You’ll build an application that will allow users to attend and take part in digital auctions. The application will allow users to create an account, put in bids, and even set up their own auction. Below we’ll uncover the required components, their functionality, and how to deploy them within this architecture.

Ready to get started? Ok, let’s dive straight in.

What will you need?

  • NodeJS: used as an open-source, cross-platform, backend JavaScript runtime environment that executes Javascript code outside a web browser.
  • Amazon Cognito: used to securely manage and synchronize app data for users on mobile.
  • Redis Enterprise Cloud: used as a real-time database, cache, and message broker.
  • Redis Stack: used to store, update and fetch JSON values from Redis.
  • Socket.IO: used as a library that provides real-time, bi-directional, and event-based communication between the browser and the server.
  • AWS Lambda: used as a serverless compute service that runs your code in response events and manages the underlying compute service automatically for you.
  • Amazon SNS/Amazon SES: a fully managed messaging service for both application-to-application (A2A) and application-to-person (A2P) communication.

Architecture

My Image

How does it work?

All auctions

NodeJS connects to the Redis Enterprise Cloud database.

The frontend then communicates with the NodeJS backend through API calls.

GET : /api/auctions fetches all the keys from Auctions Hash.

NodeJS uses the Redis module to work with Redis Enterprise Cloud. The Redis client is then created using the Redis credentials and hmget(). This is the equivalent of the HMSET command that’s used to push data to the Redis database.

Each auction

GET : /api/auctions/{auctionId} fetches each auction item by id.

NodeJS uses the Redis module to work with Redis Cloud. The Redis client is then created using the Redis credentials and hmget(). This is the equivalent of the HMSET command that’s used to push data to the Redis database.

All bidding data of an auction item

GET : /api/bidding/{auctionId}

NodeJS uses the Redis module to work with Redis Cloud. The Redis client is then created using the Redis credentials and hmget(). This is the equivalent of the HMSET command that’s used to push data to the Redis database.

Profile settings

GET : /api/settings

NodeJS uses the Redis module to work with Redis Cloud. The Redis client is then created using the Redis credentials and hmget(). This is the equivalent of the HMSET command that’s used to push data to the Redis database.

User info

GET : /api/users/{email}

NodeJS uses the Redis module to work with Redis Cloud. The Redis client is then created using the Redis credentials and hmget(). This is the equivalent of the HMSET command that’s used to push data to the Redis database.

Getting started

Prerequisites

Step 1. Sign up for a Free Redis Enterprise Cloud Account

Follow this tutorial to sign up for a free Redis Enterprise Cloud account.

image

Choose AWS as a Cloud vendor while creating your new subscription. At the end of the database creation process, you will get a Redis Enterprise CLoud database endpoint and password. You can save it for later use.

image

Step 2: Clone the backend GitHub repository

https://github.com/redis-developer/NR-digital-auction-backend

Step 3. Install the package dependencies

The 'npm install' is a npm cli-command that does the predefined thing i.e install dependencies specified inside package.json

npm install

Step 4. Setting up environment variables

export REDIS_END_POINT=XXXX
export REDIS_PASSWORD=XXX
export REDIS_PORT=XXX

Step 5. Building the application

npm run build

Step 6. Starting the application

npm start

Step 7. Cloning the Frontend GITHUB repository

git clone https://github.com/redis-developer/NR-digital-auction-frontend

Step 8. Building the application

npm run build

Step 9. Starting the application

npm start

Step 10. Accessing the application

accessing

Step 11. Signing up to the application

signing

Step 12. Sign-in

sign

Step 13. Accessing the dashboard

access

Step 14. Listing the auction item

Listing

Step 15. Accessing the bidding page

accessing

How data is stored

The Redis Enterprise Cloud database with Redis Stack is what you’ll use to install the data.

Auctions

  • Type - Redis Hash.
  • Used for storing auctions data.
  • UUID generated from the backend (NodeJS) serves as the key
  • JSON data which represents the Auction object and includes the following keys
    • auctionId
    • auctionItemName
    • description
    • lotNo
    • quantity
    • buyersPremium
    • itemUnit
    • minBidAmount
    • bidIncrement
    • startDateTime
    • endDateTime
    • images
    • currentBid
  • NodeJS connects to the Redis Cloud database. The Frontend communicates with the NodeJS backend through API calls.
  • POST : /api/auctions.
  • The request body has JSON data to be inserted into the database.
  • NodeJS uses the Redis module to work with Redis Cloud. The Redis client is created. using the Redis credentials and hmset(). This is the equivalent of the HMSET command that’s used to push data to the Redis database.

Biddings

  • Type - Redis Hash

  • Used for storing the bids placed on each auction item

  • NodeJS connects to the Redis Cloud database. The Frontend communicates with the NodeJS backend through API calls.

  • POST : /api/bidding

  • The request body has JSON data to be inserted into the database.

  • AuctionId from request body serves as the key

  • JSON data which includes keys:

    • currentBid
    • currentBidTime
    • currentBidEndTime, and
    • biddings array (id, auctionId, userId, username, bidAmount, bidTime)
  • The bidding array has all of the bids placed for a particular auction item.

  • Based on the current BidEndTime and BidTime, the auction end date is extended based on the Dynamic closing concept.

  • Current dynamic closing logic - If a new bid is placed within the last 5 minutes of the auction end time, the end time is extended by 1 hour.

  • This will be configurable in the SaaS solution.

  • NodeJS uses the Redis module to work with Redis Cloud. The Redis client is created using the Redis credentials and hmset(). This is the equivalent of the HMSET command that’s used to push data to the Redis database.

Profile Settings

  • Type - string
  • JSON data which includes keys - serves as a value
  • NodeJS connects to the Redis Cloud database. The frontend communicates with the NodeJS backend through API calls.
  • POST : /api/settings
  • The request body has JSON data to be inserted into the database.
  • NodeJS uses the Redis module to work with Redis Cloud. The Redis client is created using the Redis credentials and hmset(). This is the equivalent of the HMSET command that’s used to push data to the Redis database.

Users

  • Type - Redis Hash
  • Used for storing user details
  • NodeJS connects to the Redis Cloud database. The Frontend communicates with the NodeJS backend through API calls.
  • POST : /api/users
  • The request body has JSON data to be inserted into the database
  • The email id serves as the key
  • The JSON data which includes keys - serves as a value
  • NodeJS uses the Redis module to work with Redis Cloud. The Redis client is created using the Redis credentials and hmset(). This is the equivalent of the HMSET command that’s used to push data to the Redis database.

Creating an account

When you go onto the Digital Auction’s homepage, you’ll come across a range of items that are to be auctioned (see below). Click on the ‘Welcome’ button to create an account.

creating

You’ll then be taken to the sign-up page. Enter your details and click ‘sign-up.’ Once you’ve completed the sign-up form, you’ll receive a confirmation email to activate your account.

Placing a bid

Go to the homepage to have access to view all of the items and their auction details. All of the data here is being populated by Redis Stack and Redis Cloud. Scroll through the page and click on the item that you want to place a bid for.

placing

When you click on an item, you’ll see the details for the bidding process at the top of the page. You’ll also have the option to set a reminder by receiving an email of whenever the bidding process of this item begins.

On the right-hand side of the image, you’ll see the highest bid that’s been placed for this item. Below is a list of previous bids made by different users which are updated in real-time.

Click on the ‘Place Bid’ button to make a bid.

To access the meta-data information or view more images of the item, simply scroll down the page (see below).

placebid

Viewing your Bidding History

Click on ‘My biddings’ at the top of the navigation bar to view your bidding history (see below).

view

Viewing upcoming auctions

Click on ‘Auctions’ at the top of the navigation bar to view all upcoming auctions.

auction

Conclusion: Leveraging Redis and AWS to Empower Auctioneers with Real-time Data

Digital technology has had a ripple effect across all aspects of modern life. The ability to complete important tasks online instead of in-person has revolutionized the way we live, helping us to reduce carbon emissions, save time from traveling and have instant access to reams worth of data that we never had before.

However, the success of such events hinges on a database’s ability to transmit data in real-time. Any blips in transmission would create a disconnect between users and the auction, impeding auctioneers’ reactions to bids. This would only result in frustration, disengagement, and a complete divorce of users from the application.

But thanks to Redis, the components that made up the architecture system became vastly more interconnected so data was able to be sent, processed, and received in real-time. Achieving this paves the way for a smooth bidding process where users can interact with events in real-time without interruptions, ultimately enhancing the functionality of the app.

NR-Digital-Auction is a fantastic example of how innovations can be brought to life by using Redis. Everyday programmers are experimenting with Redis to build applications that are impacting everyday life from around the world and you can too!

So what can you build with Redis? For more inspiration, you can head over to the Redis Launchpad to access an exciting range of applications. If you're ready to get started building, quickly spin up a free database Redis Enterprise.

- + \ No newline at end of file diff --git a/create/aws/chatapp/index.html b/create/aws/chatapp/index.html index 830cbc1e28..ab06236f78 100644 --- a/create/aws/chatapp/index.html +++ b/create/aws/chatapp/index.html @@ -4,7 +4,7 @@ How to Build a Real Time Chat application on Amazon Web Services using Python and Redis | The Home of Redis Developers - + @@ -19,7 +19,7 @@ User data is stored in a hash set where each user entry contains the next values:

  • username: unique user name;

  • password: hashed password

  • Additionally a set of rooms is associated with user

  • Rooms are sorted sets which contains messages where score is the timestamp for each message

  • Each room has a name associated with it

  • Online set is global for all users is used for keeping track on which user is online.

  • User hash set is accessed by key user:{userId}. The data for it stored with HSET key field data. User id is calculated by incrementing the total_users key (INCR total_users)

  • Username is stored as a separate key (username:{username}) which returns the userId for quicker access and stored with SET username:{username} {userId}.

  • Rooms which user belongs too are stored at user:{userId}:rooms as a set of room ids. A room is added by SADD user:{userId}:rooms {roomId} command.

  • Messages are stored at room:{roomId} key in a sorted set (as mentioned above). They are added with ZADD room:{roomId} {timestamp} {message} command. Message is serialized to an app-specific JSON string.

Step 9. How the data is accessed?

Get User HGETALL user:{id}.

HGETALL user:2

where we get data for the user with id: 2.

  • Online users: SMEMBERS online_users. This will return ids of users which are online

  • Get room ids of a user: SMEMBERS user:{id}:rooms. Example:

 SMEMBERS user:2:rooms

This will return IDs of rooms for user with ID: 2

  • Get list of messages ZREVRANGE room:{roomId} {offset_start} {offset_end}. Example:
 ZREVRANGE room:1:2 0 50

It will return 50 messages with 0 offsets for the private room between users with IDs 1 and 2.

Further References

- + \ No newline at end of file diff --git a/create/aws/import/database-migration-aws-elasticache-redis-enterprise-cloud/index.html b/create/aws/import/database-migration-aws-elasticache-redis-enterprise-cloud/index.html index 0330841f5c..d1a4b677f8 100644 --- a/create/aws/import/database-migration-aws-elasticache-redis-enterprise-cloud/index.html +++ b/create/aws/import/database-migration-aws-elasticache-redis-enterprise-cloud/index.html @@ -4,7 +4,7 @@ How to migrate your database from AWS ElastiCache to Redis without any downtime | The Home of Redis Developers - + @@ -12,7 +12,7 @@

How to migrate your database from AWS ElastiCache to Redis without any downtime


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Most of the database migration tools available today are offline in nature. They are complex and require manual intervention.

If you want to migrate your data from Amazon ElastiCache to Redis Enterprise Cloud, for example, the usual process is to back up your ElastiCache data to an Amazon S3 bucket and then import your data using the Redis Enterprise Cloud UI. This process can require painful downtime and could result in data loss. Other available techniques include creating point-in-time snapshots of the source Redis server and applying the changes to the destination servers to keep both the servers in sync. That might sound like a good approach, but it can be challenging when you have to maintain dozens of scripts to implement the migration strategy.

So we’ve come up with a different approach:

Introducing RIOT

image

RIOT is an open source online migration tool built by Julien Ruaux, a Solution Architect at Redis. RIOT implements client-side replication using a producer/consumer approach. The producer is the combination of the key and value readers that have a connection to ElastiCache. The key reader component identifies keys to be replicated using scan and keyspace notifications. For each key, the value reader component performs a DUMP and handles the resulting key+bytes to the consumer (writer), which performs a RESTORE on the Redis Enterprise connection.

This blog post will show how to perform a seamless online migration of databases from ElastiCache to Redis Enterprise Cloud.

Prerequisites:

You will require a few resources to use the migration tool:

  • A Redis Enterprise Cloud subscription, sign up here
  • Amazon ElastiCache (a primary endpoint in the case of a single-master EC and a configuration endpoint in the case of a clustered EC: Refer to Finding Connection Endpoints on the ElastiCache documentation to learn more)
  • An Amazon EC2 instance based on Linux

Step 1 - Setting up an Amazon EC2 instance

You can either create a new EC2 instance or leverage an existing one. In our example, we will first create an instance on Amazon Web Services (AWS). The most common scenario is to access an ElastiCache cluster from an Amazon EC2 instance in the same Amazon Virtual Private Cloud (Amazon VPC). We have used Ubuntu 16.04 LTS for this setup, but you can choose the Ubuntu or Debian distribution of your choice.

Use SSH to connect to this new EC2 instance from your computer as shown here:

ssh -i “public key” <AWS EC2 Instance>

Step 2 - Install the redis-cli tool

$ sudo apt update
# sudo apt install -y redis-tools

Verify the connectivity with the ElastiCache database

Syntax:

$ redis-cli -h <Elasticache Primary Endpoint > -p 6379

Command:

$ sudo redis-cli -h <elasticache primary endpoint> -p 6379

Ensure that the above command allows you to connect to the remote Redis database successfully.

Step 3 - Using the RIOT migration tool

Run the commands below to set up the migration tool.

Prerequisites:

Install Java

We recommended using OpenJDK 11 or later:

sudo add-apt-repository ppa:openjdk-r/ppa && sudo apt-get update -q && sudo apt install -y openjdk-11-jdk

Installing RIOT

Unzip the package and make sure the RIOT binaries are in place, as shown here:

wget https://github.com/Redislabs-Solution-Architects/riot/releases/download/v2.0.8/riot-redis-2.0.8.zip
unzip riot-redis-2.0.8.zip
cd riot-redis-2.0.8/bin/

You can check the version of RIOT by running the command below:

./riot-redis --version
RIOT version "2.0.8"
bin/riot-redis --help
Usage: riot-redis [OPTIONS] [COMMAND]
-q, --quiet Log errors only
-d, --debug Log in debug mode (includes normal stacktrace)
-i, --info Set log level to info
-h, --help Show this help message and exit.
-V, --version Print version information and exit.
Redis connection options
-r, --redis=<uri> Redis connection string (default: redis://localhost:6379)
-c, --cluster Connect to a Redis Cluster
-m, --metrics Show metrics
-p, --pool=<int> Max pool connections (default: 8)
Commands:
replicate, r Replicate a source Redis database in a target Redis database
info, i Display INFO command output
latency, l Calculate latency stats
ping, p Execute PING command

Once Java and RIOT are installed, we are all set to begin the migration process with the command below, which replicates data directly from the source (ElastiCache) to the target (Redis Enterprise Cloud).

Step 4 - Migrate the data

Finally, it’s time to replicate the data from ElastiCache to Redis Enterprise Cloud by running the following command:

sudo ./riot-redis -r redis://<source Elasticache endpoint>:6379 replicate -r redis://password@<Redis Enterprise Cloud endpoint>:port --live

ElastiCache can be configured in two ways: clustered and non-clustered. In the chart below, the first row shows what commands you should perform for the non-clustered scenario, while the second row shows the command for the clustered scenario with a specific database namespace:

As you can see, whenever you have a clustered ElastiCache, you need to pass the –cluster option before specifying the source ElastiCache endpoint.

Important notes

  • Perform user acceptance testing of the migration before using it in production.
  • Once the migration is complete, ensure that application traffic gets successfully redirected to the Redis Enterprise endpoint.
  • Perform the migration process during a period of low traffic to minimize the chance of data loss.

Conclusion

If you’re looking for a simple and easy-to-use live migration tool that can help you move data from Amazon ElastiCache to Redis Enterprise Cloud with no downtime, RIOT is a promising option.

- + \ No newline at end of file diff --git a/create/aws/index.html b/create/aws/index.html index 1a9936f1c4..5bea90a145 100644 --- a/create/aws/index.html +++ b/create/aws/index.html @@ -4,7 +4,7 @@ Overview | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Overview

The following links provide you with the available options to run apps on AWS using Redis:

Create a Redis database on AWS
Building a Slack Bot to Retrieve Lost Files Using AWS S3 and Search
Using Terraform to Deploy and Manage Redis Database on AWS
Learn how to build a Real-Time Bidding Platform using NodeJS, AWS Lambda and Redis
Migrating Your Database from AWS Elasticache to Redis usin RIOT tool
Building a Real-Time Chat application on AWS using Flask and Redis
Learn how to Build and Deploy Your Own Analytics Dashboard using NodeJS, AWS Lambda and Redis
- + \ No newline at end of file diff --git a/create/aws/redis-on-aws/images/index.html b/create/aws/redis-on-aws/images/index.html index 728fc53401..4bb8545423 100644 --- a/create/aws/redis-on-aws/images/index.html +++ b/create/aws/redis-on-aws/images/index.html @@ -4,7 +4,7 @@ images | The Home of Redis Developers - + @@ -12,7 +12,7 @@
- + \ No newline at end of file diff --git a/create/aws/redis-on-aws/index.html b/create/aws/redis-on-aws/index.html index 1ec48986e7..9e163a44d2 100644 --- a/create/aws/redis-on-aws/index.html +++ b/create/aws/redis-on-aws/index.html @@ -4,7 +4,7 @@ Create Redis database on AWS | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Create Redis database on AWS


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Redis Enterprise Cloud on AWS is a fully Managed Redis Enterprise as a service. Designed for modern distributed applications, Redis Enterprise Cloud on AWS is known for its high performance, infinite scalability and true high availability.

Follow the below steps to setup Redis Enterprise Cloud hosted over AWS Cloud:

Step 1. Create free cloud account

Create your free Redis Enterprise Cloud account. Once you click on “Get Started”, you will receive an email with a link to activate your account and complete your signup process.

tip

For a limited time, use TIGER200 to get $200 credits on Redis Enterprise Cloud and try all the advanced capabilities!

🎉 Click here to sign up

My Image

Step 2. Create Your subscription

Next, you will have to create Redis Enterprise Cloud subscription. In the Redis Enterprise Cloud menu, click "Create your Subscription".

My Image

Step 3. Select the right Subscription Plan

Select "Fixed Plan" for low throughout application as for now.

My Image

Step 4. Select cloud vendor

For the cloud provider, select your preferred cloud (for demo purpose)

My Image

Step 5. Click "Create Subscription"

Finally, click on "Create Subscription" button.

My Image

You can now verify the subscription as shown below:

My Image

Step 6. Create database

Click "Create Database". Enter database name and your preferred module.

My Image

Step 7. Launch database

Click "Activate" and wait for few seconds till it gets activated. Once fully activated, you will see the database endpoints as shown below:

My Image

Next Steps

Redis Launchpad
- + \ No newline at end of file diff --git a/create/aws/slackbot/index.html b/create/aws/slackbot/index.html index f1a6c26ad2..6f9e3ec9ca 100644 --- a/create/aws/slackbot/index.html +++ b/create/aws/slackbot/index.html @@ -4,7 +4,7 @@ How to Build a Slack Bot to Retrieve Lost Files Using AWS S3 and Redis Search and Query Engine | The Home of Redis Developers - + @@ -13,7 +13,7 @@

How to Build a Slack Bot to Retrieve Lost Files Using AWS S3 and Redis Search and Query Engine


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

alt_text

If you work remotely then you’re likely to have come across Slack at some point. And if you use Slack on a daily basis, then you’ll be all too aware of how easy it can be to lose files. Being pinged every day by different employees across different channels makes it difficult to keep track of files.

Eventually, you may be forced to rummage through a cluttered library of documents in search of that one _crucial _document that’s needed for a task, report or even a meeting. We’ve all been there and the frustration is just as real as it is irritating...which is why this Launchpad App has created an application to remove this impediment.

It was a tricky application to make, requiring a lot of attention to detail across a number of different components for it to come to fruition. However, the success of this application was possible due to Redis’ ability to extract and process data proficiently.

Thanks to Redis, all of the components functioned harmoniously with one another, creating a fully optimal application that interacts with users in real-time.

Let’s take a look at how this was done. Before we dive in, we’d also like to point out that we have a variety of exciting applications for you to check out in our Redis Launchpad.

alt_text

  1. What will you build?
  2. What will you need?
  3. Architecture overview
  4. Getting started
  5. How it works

Step 1. What will you build?

You’ll build a special search engine that’s designed to retrieve lost files in Slack. This is especially handy for people who work from home, where documents between channels are easily lost between employees.

Below we’ll go through each step chronologically and highlight what components are required to create this application.

Ready to get started? OK, let’s dive straight in.

Step 2. What will you need?

  • Slack: used as an instant messaging app that connects employees with one another.
  • Slack Block Kit: used as a UI framework for Slack apps that offers a balance of control and flexibility when building experiences.
  • Python: the preferred programming language to connect Redis in the application.
  • Redis Stack: includes a built-in Search and Query feature that provides querying, secondary indexing and full-text search.
  • S3 bucket: used as a public cloud storage resource in Amazon Web Services (AWS).
  • AWS Textract: used as a machine learning service that automatically extracts text.
  • Nodejs: responsible for image generation.

Step 3. Architecture

Let’s look at each of the components that creates the Reeko-Slack bot:

alt_text

1. file_shared

  • When a new file is shared in any public slack channel the file_share event is sent to the Slack Bot app.
  • The file name is added as a suggestion using the FT.SUGADD command in Redis.
  • All file data is added using the JSON.SET command.
  • The file is then stored on the S3 bucket as an object with the key as the filename.

2. S3-get

  • The JSON.GET command checks whether the desired file exists.
  • The file will then be retrieved from the S3 bucket if found.
  • The FT.SEARCH command uses the Redis Search and Query engine to look for documents in the S3 bucket- Users are presented will be prompted with different file name suggestions based on what they’ve typed in the search bar.
  • Once the user chooses one of the file suggestions, it is then downloaded and sent back to Slack.

4. S3-delete

  • User types the file name from the command['text'] parameter
  • The file data is deleted from Redis using the JSON.DEL command and is also removed from Redis's suggestions using the FT.SUGDEL command.

5. Summarise-document

  • The file name is identified from the command['text'] parameter.
  • It is then retrieved from the S3 bucket through the JSON.GET command.
  • Users can either download the pdf or png file locally from the S3 bucket.
  • The text is extracted using AWS Textract.
  • The extracted text is then summarised using Hugging face transformers summarization pipeline. The text summary is also added back to the JSON document using JSON.SET command.
  • A post request is then sent to the /create-image on the NodeJS backend with the file name and summary text.
  • An image is generated using a base template.
  • The image that is returned is saved to the S3 bucket and sent back to Slack.
  • The image URL is also added to the JSON document using JSON.SET command.

What is the S3 bucket?

The S3 bucket is a simple storage service from Amazon. It allows users to store objects through a web service interface. The product’s value comes from its ability to store, protect and retrieve data from ‘buckets’ at any time from anywhere and on any device.

Step 4. Getting started

Prerequisites

  • Python 3.6+
  • ngrok
  • AWS Account
  • Slack
  • Docker

1. Run Redis Docker container

This simple container image bundles together the latest stable releases of Redis and select Redis modules from Redis Labs. This image is based on the official image of Redis from Docker. By default, the container starts with Redis' default configuration and all included modules loaded.

 docker run -d -p 6379:6379 redis/redis-stack

2. Setup a Python environment

To test the integration, Python needs to be installed on your computer. You can get a suitable release from here. To check your Python version, follow the command below.

 # Python 3.6+ required
git clone https://github.com/redis-developer/Reeko-Slack-Bot
cd Reeko-Slack-Bot
python3 -m venv env
source env/bin/activate
cd python-backend
pip3 install -r requirements.txt

3. Using ngrok as a local proxy

To develop locally we'll be using ngrok. This will allow you to expose a public endpoint that Slack can use to send your app events. If you haven't already, install ngrok from their website .

The ngrok exposes local networked services behind NATs and firewalls to the public internet over a secure tunnel. Share local websites, build/test webhook consumers and self-host personal services.

4. Setting up an AWS Account

For testing you need a verified aws account. You can get your credentials file at ~/.aws/credentials (C:\Users\USER_NAME.aws\credentials for Windows users) and copy the following lines in the .env file.

Also make sure to add your S3 bucket name in the .env file.

 AWS_ACCESS_KEY_ID="YOUR_ACCESS_KEY_ID"
AWS_SECRET_ACCESS_KEY="YOUR_SECRET_ACCESS_KEY"
BUCKET_NAME="YOUR_BUCKET_NAME"

5. Install Slack on your local system

Slack needs to be installed on your computer. If it hasn’t been installed, you can get it from here for Windows or Mac. If you don’t already have an account you can make one here.

To get started, you'll need to create a new Slack app by clicking on the following link - https://api.slack.com/apps The foundational framework we’ll be using is Bolt. This will make it easier to build Slack apps with the platform’s latest features.

  1. Click on the Create an App button
  2. Name the application ‘Reeko’ and choose the development workspace. \

alt_text

alt_text

  1. Requesting scopes - Scopes give your app permission to do things (for example, post messages) in your development workspace. You can select the scopes to add to your app by navigating over to the OAuth & Permissions sidebar.
  2. Add the Bot Token Scopes by clicking on the Add an OAuth Scope button.
OAuth ScopeDescription
channels:historyView messages and other content in public channels that reeko has been added to
channels:joinJoin public channels in a workspace
channels:readView basic information about public channels in a workspace
channels:joinJoin public channels in a workspace
chat:writeSend messages as @reeko
chat:write.customizeSend messages as @reeko with a customized username and avatar
chat:write.publicSend messages to channels @reeko isn't a member of
files:readView files shared in channels and conversations that reeko has been added to
files:writeUpload, edit, and delete files as reeko
  1. Add the User Token Scopes by clicking on the Add an OAuth Scope button
OAuth ScopeDescription
channels:historyView messages and other content in public channels that Reeko has been added to.
files:readView files shared in channels and conversations that Reeko has been added to.
  1. Install your own app by selecting the Install App button at the top of the OAuth & Permissions page, or from the sidebar.
  2. After clicking through one more green Install App To Workspace button, you'll be sent through the Slack OAuth UI.
  3. After installation, you'll land back in the OAuth & Permissions page and find a Bot User OAuth Access Token. and a User OAuth Token. Click on the copy button for each of them. These tokens need to be added to the .env file. (The bot token starts with xoxb whereas the user token is longer and starts with xoxp).
SLACK_USER_TOKEN=xoxp-your-user-token
SLACK_BOT_TOKEN=xoxb-your-bot-token

alt_text

  1. As well as the access token, you'll need a signing secret. Your app's signing secret verifies that incoming requests are coming from Slack. Navigate to the Basic Information page from your app management page. Under App Credentials, copy the value for Signing Secret and add it to the .env file.
SLACK_SIGNING_SECRET=your-signing-secret

alt_text

  1. Make sure you have followed the steps in Cloning the repo to start the bolt app. The HTTP server is using a built-in development adapter, which is responsible for handling and parsing incoming events from Slack on port 3000.
python3 app.py

alt_text

Open a new terminal and ensure that you've installed ngrok. Make sure to tell ngrok to use port 3000 (which Bolt for Python uses by default):

ngrok http 3000

alt_text

For local slack development, we'll use your ngrok URL from above, so copy it to your clipboard.

https://your-own-url.ngrok.io
  1. Now we’re going to subscribe to events. Your app can listen to all sorts of events that are happening around your workspace - messages being posted, files being shared and more. On your app configuration page, select the Event Subscriptions sidebar. You'll be presented with an input box to enter a Request URL, which is where Slack sends the events your app is subscribed to. Hit the save button.

By default Bolt for Python listens for all incoming requests at the /slack/events route, so for the Request URL you can enter your ngrok URL appended with /slack/events:

https://your-own-url.ngrok.io/slack/events

If the challenge was successful, you’ll get “verified” right next to the Request URL.

alt_text

On the same page click on the Subscribe to bot events menu that sits at the bottom of the page. Click on the Add Bot User Event.

Similarly, click on Subscribe to events on behalf of the user but then click on Add Workspace Event.

Add the following scopes

EventNameDescriptionRequired Scope
file_shareA file was sharedfiles:read
message.channelsA message was posted to a channelchannels: history

alt_text

  1. Select the Interactivity & Shortcuts sidebar and toggle the switch as on. Again, for the Request URL, enter your ngrok URL appended with /slack/events:
https://your-own-url.ngrok.io/slack/events

alt_text

  1. Scroll down to the Select Menus section in the Options Load URL and enter your ngork URL appended with /slack/events:
https://your-own-url.ngrok.io/slack/events

alt_text

  1. Finally we come to the slash commands. Slack's custom slash commands perform a very simple task. First they take whatever text you enter after the command itself (along with some other predefined values). Next, they’ll send it to a URL and accept whatever the script returns. After this, Slack will post it as a Slackbot message to the person who issued the command. We have 5 slash commands to add in the workspace.

Visit the Slash Commands sidebar and click on the Create New Command button to head over the Create New Command page. Add the Command, Request URL, Short Description and Usage hint, according to the table provided below.

Click on Save to return to the Slash Commands.

CommandRequest URLShort DescriptionUsage Hint
/s3-gethttps://your-own-url.ngrok.io/slack/eventsGet a file from s3 bucketfilename
/s3-searchhttps://your-own-url.ngrok.io/slack/eventsSearch for a file in S3
/s3-deletehttps://your-own-url.ngrok.io/slack/eventsDeletes the given file from the s3 bucketfilename
/summarise-documenthttps://your-own-url.ngrok.io/slack/eventsSummarise a documentfilename

alt_text

alt_text

alt_text

alt_text

alt_text

  1. Open the Slack channel and upload a file in any channel. Make sure to note the file name.

6. Setting up a NodeJS backend

Requirements

Getting Started:

alt_text

GraphicsMagick

GraphicsMagick is a highly versatile piece of software used for image processing. To generate images, you need to have GraphicsMagick installed on your machine.

You can find the suitable release from http://www.graphicsmagick.org/download.html#download-sites.

alt_text

Nodejs

note

Please follow all the steps in python-backend/README.md first.

Copy the AWS credentials from the python-backend/.env to the config.json file.

{
"accessKeyId": "",
"secretAccessKey": "",
"region": ""
}

Install all the packages and run the server.

npm install
npm start

alt_text

7: Connecting Slack to S3

This step involves bringing your AWS S3 bucket to your Slack account. Doing this will allow you to upload, download or delete files from your workspace without writing a single line of code.

/S3-get filename

The purpose of this command is to retrieve a specified file from the S3 bucket. Once you type in the name of the file in the search bar, Reeko will check whether this document exists. If the document doesn’t exist then it will return as false and nothing will be done.

If the file is found, then the JSON.GET command will capture its name and download it from the S3 bucket. The downloaded file is sent back as a direct message in Slack.

JSON.GET amazonshareholderletterpdf

alt_text

/s3-delete filename

This command involves deleting files from the S3 bucket. To achieve this you simply need to type in the file name in the search bar and Reeko will pull up the file as demonstrated below.

You’ll have the option to permanently delete the file from the S3 bucket. The file data is deleted from Redis using the JSON.DEL command and is removed from Search suggestions using the FT.SUGDEL command. You’ll be informed when the file is deleted.

FT.SUGDEL file-index "amazon-shareholder-letter.pdf"

JSON.DEL amazonshareholderletterpdf

Step 8: File searching

Have you ever searched for a file without being entirely sure what it is you’re looking for? You may remember snippets of the content but not enough to manually track down its location. Well due to Search’s autocomplete functionality this will no longer be a problem.

This command first opens up a modal inside of Slack with a search bar. You’ll then be suggested different file names will then be suggested depending on whatever the text you’ve written is. The way this works is simple:

Let’s assume the bucket has documents called abcd.csv, abcd.csv and abcdef.sv. If you type abc into the search bar, you’ll get these three results as a list from the FT.SEARCH command. From here you’ll be able to select the file you’re looking for. Once selected, the file is downloaded and sent back to Slack.

FT.SEARCH file-index "ama"

Step 9: Document summarization

In this step, Reeko will extract all of the text from the documents and summarize the content of each one with an image. This will prevent you from having to tediously open each document to get access to important information. Here’s how to do it:

  1. Get the file name from the command['text'] parameter.
  2. If the file is found, you can get the file's name by using the JSON.GET command.
 JSON.GET amazonshareholderletterpdf
  1. Download the pdf or png file locally from S3 bucket
  2. Extract the text using AWS Textract.
  3. The extracted text is summarised using the Hugging face transformers summarisation pipeline. The text summary is also added back to the JSON document using JSON.SET command.
 JSON.SET amazonshareholderletterpdf .summary ' Amazon has grown from having 158 employees to 614. We had just gone public at a split-adjusted stock price of $1. 50 per share.  In 1997, we hadnâ\x80\x99t invented prime, marketplace, alexa, or aws. If you want to be successful in business, you have to create more than you consume.  Your goal should be to create value for everyone you interact with. Stock prices are not about the past.  They are a prediction of future cash flows discounted back to the present.'
  1. A post request is then sent to the /create-image on the nodejs backend with the file name and summary text.
  2. An image is generated using a base template.
  3. The image that is returned is saved to the S3 bucket and sent back to Slack.
  4. The image URL is also added to the JSON document using JSON.SET command.
 JSON.SET amazonshareholderletterpdf .file_path 'https://bucket-1234.s3.amazonaws.com/b8bac45f-7f69-4c28-a26e-9888d9771bed-image.png'

Below we’ve used the Amazon 2020 shareholder letter as an example of how a document can be summarized using Reeko.

5. How it works

The Slack app is built using Bolt for Python framework. To connect the AWS S3 bucket and AWS Textract, use their respective boto3 clients.

Slack is receptive to all events around your workspace such as messages being posted, files being shared, users joining the team, and more. To listen to events, Slack uses the Events API. And to enable custom interactivity, you can use the Block Kit.

Slash commands work in the following way. First they consider the text you enter after the command itself and then send it to a URL. They then accept whatever the script returns and post it as a Slackbot message to the person who issued the command. There’s a set of 4 slash commands that make the slackbot.

In the application there are two Redis Modules:

The code below is used to initialize Redis in redisearch_connector.py. This is done by creating an index with the name file_index.

from redisearch import Client, TextField, AutoCompleter, Suggestion

class RedisSearchConnector():
def __init__(self):
self.index_name = 'file_index'
self.client = Client(self.index_name)
self.ac = AutoCompleter(self.index_name)

Use the following code to initialise Redis JSON in redisjson_connector.py.

from rejson import Client, Path

class RedisJsonConnector():
def __init__(self):
self.rj = Client(decode_responses=True)

And the code below is used to create an index in Redis

FT.CREATE file-index ON HASH SCHEMA file_name TEXT SORTABLE file_id TEXT created TEXT timestamp TEXT mimetype TEXT filetype TEXT user_id TEXT size

Conclusion: preventing lost files with Redis

The advanced capabilities of Redis allowed this Launchpad App to create an invaluable asset to remote workers - to never lose a file again on Slack. Redis Stack offered a simple yet effective way of transmitting data to and from the S3 bucket with no lags, no pauses and no delays whatsoever. You can discover more about the ins and outs of how this app was made by simply clicking here.

Reeko is an innovative application that joins our exciting collection of apps that we currently have on the Redis Launchpad. By using Redis, programmers from all over the world are creating breakthrough applications that are having an impact on daily lives… and you can too.

So how can you change the world using Redis? For more inspiration, make sure to check out the other applications we have on our Launchpad.

alt_text

Who built this application?

Sarthak Arora

Being only 20 years old, Sarthak is a young yet highly-advanced programmer who’s already a 2x international Hacong winner.

To discover more about his work and his activity on GitHub, you can check out his profile here.

References

- + \ No newline at end of file diff --git a/create/aws/terraform/index.html b/create/aws/terraform/index.html index eaae4481bf..d3dd90654f 100644 --- a/create/aws/terraform/index.html +++ b/create/aws/terraform/index.html @@ -4,7 +4,7 @@ How to Deploy and Manage Redis Database on AWS Using Terraform | The Home of Redis Developers - + @@ -18,7 +18,7 @@ Within the block body (between { and }) are query constraints defined by the data source. Most arguments in this section depend on the data source, and indeed in this example card_type and last_four_numbers are all arguments defined specifically for the rediscloud_payment_method data source.

Configure Redis Enterprise Cloud programmatic access

In order to set up authentication with the Redis Enterprise Cloud provider, a programmatic API key must be generated for Redis Enterprise Cloud. The Redis Enterprise Cloud documentation contains the most up-to-date instructions for creating and managing your key(s) and IP access.

note

Flexible and Annual Redis Enterprise Cloud subscriptions can leverage a RESTful API that permits operations against a variety of resources, including servers, services, and related infrastructure. The REST API is not supported for Fixed or Free subscriptions.

 provider "rediscloud" { } # Example resource configuration
resource "rediscloud_subscription" "example" { # ... }

Prerequisites:

  • Install Terraform on MacOS.
  • Create a free Redis Enterprise Cloud account.
  • Create your first subscription.
  • Enable API

Step 1: Install Terraform on MacOS

Use Homebrew to install Terraform on MacOS as shown below:

 brew install terraform

Step 2: Sign up for a free Redis Enterprise Cloud account

Follow this tutorial to sign up for free Redis Enterprise Cloud account.

Redis Cloud

Step 3: Enable Redis Enterprise Cloud API

If you have a Flexible (or Annual) Redis Enterprise Cloud subscription, you can use a REST API to manage your subscription programmatically. The Redis Cloud REST API is available only to Flexible or Annual subscriptions. It is not supported for Fixed or Free subscriptions.

For security reasons, the Redis Cloud API is disabled by default. To enable the API: Sign in to your Redis Cloud subscription as an account owner. From the menu, choose Access Management.

When the Access Management screen appears, select the API Keys tab.

Terraform

If a Copy button appears to the right of the API account key, the API is enabled. This button copies the account key to the clipboard.

If you see an Enable API button, select it to enable the API and generate your API account key.

To authenticate REST API calls, you need to combine the API account key with an API user key to make API calls.

Terraform

Step 4: Create a main.tf file

It’s time to create an empty “main.tf” file and start adding the provider, resource and data sources as shown below:

 terraform {
required_providers {
rediscloud = {
source = "RedisLabs/rediscloud"
version = "0.2.2"
}
}
}
# Provide your credit card details
data "rediscloud_payment_method" "card" {
card_type = "Visa"
last_four_numbers = "XXXX"
}
# Generates a random password for the database
resource "random_password" "passwords" {
count = 2
length = 20
upper = true
lower = true
number = true
special = false
}
resource "rediscloud_subscription" "rahul-test-terraform" {
name = "rahul-test-terraform"
payment_method_id = data.rediscloud_payment_method.card.id
memory_storage = "ram"
cloud_provider {

provider = "AWS"
cloud_account_id = 1
region {
region = "us-east-1"
networking_deployment_cidr = "10.0.0.0/24"
preferred_availability_zones = ["us-east-1a"]
}
}
database {
name = "db-json"
protocol = "redis"
memory_limit_in_gb = 1
replication = true
data_persistence = "aof-every-1-second"
module {
name = "RedisJSON"
}
throughput_measurement_by = "operations-per-second"
throughput_measurement_value = 10000
password = random_password.passwords[1].result
}
}

Step 5: Create an execution plan

The Terraform plan command creates an execution plan, which lets you preview the changes that Terraform plans to make to your infrastructure. By default, when Terraform creates a plan, it reads the current state of any already existing remote objects to make sure that Terraform state is up to date. It then compares the current configuration to the prior state and then proposes a set of change actions that should make the remote object match the configuration.

 % terraform plan


Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
+ create

Terraform will perform the following actions:

# random_password.passwords[0] will be created
+ resource "random_password" "passwords" {
+ id = (known after apply)
+ length = 20
+ lower = true
+ min_lower = 0
+ min_numeric = 0
+ min_special = 0
+ min_upper = 0
+ number = true
+ result = (sensitive value)
+ special = false
+ upper = true
}

# random_password.passwords[1] will be created
+ resource "random_password" "passwords" {
+ id = (known after apply)
+ length = 20
+ lower = true
+ min_lower = 0
+ min_numeric = 0
+ min_special = 0
+ min_upper = 0
+ number = true
+ result = (sensitive value)
+ special = false
+ upper = true
}

# rediscloud_subscription.rahul-test-terraform will be created
+ resource "rediscloud_subscription" "rahul-test-terraform" {
+ id = (known after apply)
+ memory_storage = "ram"
+ name = "rahul-test-terraform"
+ payment_method_id = "XXXX"
+ persistent_storage_encryption = true

+ cloud_provider {
+ cloud_account_id = "1"
+ provider = "AWS"

+ region {
+ multiple_availability_zones = false
+ networking_deployment_cidr = "10.0.0.0/24"
+ networks = (known after apply)
+ preferred_availability_zones = [
+ "us-east-1a",
]
+ region = "us-east-1"
}
}

+ database {
# At least one attribute in this block is (or was) sensitive,
# so its contents will not be displayed.
}
}

Plan: 3 to add, 0 to change, 0 to destroy.

───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────

Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run "terraform apply" now.

Step 6: Execute the action

The Terraform apply command executes the actions proposed in a Terraform plan.

 terraform apply


Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
+ create

Terraform will perform the following actions:

# random_password.passwords[0] will be created
+ resource "random_password" "passwords" {
+ id = (known after apply)
+ length = 20
+ lower = true
+ min_lower = 0
+ min_numeric = 0
+ min_special = 0
+ min_upper = 0
+ number = true
+ result = (sensitive value)
+ special = false
+ upper = true
}

# random_password.passwords[1] will be created
+ resource "random_password" "passwords" {
+ id = (known after apply)
+ length = 20
+ lower = true
+ min_lower = 0
+ min_numeric = 0
+ min_special = 0
+ min_upper = 0
+ number = true
+ result = (sensitive value)
+ special = false
+ upper = true
}

# rediscloud_subscription.rahul-test-terraform will be created
+ resource "rediscloud_subscription" "rahul-test-terraform" {
+ id = (known after apply)
+ memory_storage = "ram"
+ name = "rahul-test-terraform"
+ payment_method_id = "XXXX"
+ persistent_storage_encryption = true

+ cloud_provider {
+ cloud_account_id = "1"
+ provider = "AWS"

+ region {
+ multiple_availability_zones = false
+ networking_deployment_cidr = "10.0.0.0/24"
+ networks = (known after apply)
+ preferred_availability_zones = [
+ "us-east-1a",
]
+ region = "us-east-1"
}
}

+ database {
# At least one attribute in this block is (or was) sensitive,
# so its contents will not be displayed.
}
}

Plan: 3 to add, 0 to change, 0 to destroy.

Do you want to perform these actions?
Terraform will perform the actions described above.
Only 'yes' will be accepted to approve.

Enter a value: yes

random_password.passwords[0]: Creating...
random_password.passwords[1]: Creating...
random_password.passwords[1]: Creation complete after 0s [id=none]
random_password.passwords[0]: Creation complete after 0s [id=none]
rediscloud_subscription.rahul-test-terraform: Creating...
rediscloud_subscription.rahul-test-terraform: Still creating... [10s elapsed]
rediscloud_subscription.rahul-test-terraform: Still creating... [20s elapsed]
rediscloud_subscription.rahul-test-terraform: Creation complete after 8m32s [id=1649277]

Apply complete! Resources: 3 added, 0 changed, 0 destroyed.

Step 7: Verify the database

You can now verify the new database created under Subscription named “db-json.”

Deploy a Redis Database with Redis JSON modules on AWS using Terraform:

terraform {
required_providers {
rediscloud = {
source = "RedisLabs/rediscloud"
version = "0.2.2"
}
}
}
# Provide your credit card details
data "rediscloud_payment_method" "card" {
card_type = "Visa"
last_four_numbers = "XXXX"
}
# Generates a random password for the database
resource "random_password" "passwords" {
count = 2
length = 20
upper = true
lower = true
number = true
special = false
}
resource "rediscloud_subscription" "rahul-test-terraform" {
name = "rahul-test-terraform"
payment_method_id = data.rediscloud_payment_method.card.id
memory_storage = "ram"
cloud_provider {

provider = "AWS"
cloud_account_id = 1
region {
region = "us-east-1"
networking_deployment_cidr = "10.0.0.0/24"
preferred_availability_zones = ["us-east-1a"]
}
}
database {
name = "db-json"
protocol = "redis"
memory_limit_in_gb = 1
replication = true
data_persistence = "aof-every-1-second"
module {
name = "RedisJSON"
}
throughput_measurement_by = "operations-per-second"
throughput_measurement_value = 10000
password = random_password.passwords[1].result
}
}

Step 8: Cleanup

The Terraform destroy command is a convenient way to destroy all remote objects managed by a particular Terraform configuration. While you will typically not want to destroy long-lived objects in a production environment, Terraform is sometimes used to manage ephemeral infrastructure for development purposes, in which case you can use terraform destroy’ to conveniently clean up all of those temporary objects once you are finished with your work.

% terraform destroy
random_password.passwords[0]: Refreshing state... [id=none]
random_password.passwords[1]: Refreshing state... [id=none]
rediscloud_subscription.rahul-test-terraform: Refreshing state... [id=1649277]

Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
- destroy

Terraform will perform the following actions:

# random_password.passwords[0] will be destroyed
- resource "random_password" "passwords" {
- id = "none" -> null
- length = 20 -> null
- lower = true -> null
- min_lower = 0 -> null
- min_numeric = 0 -> null
- min_special = 0 -> null
- min_upper = 0 -> null
- number = true -> null
- result = (sensitive value)
- special = false -> null
- upper = true -> null
}

# random_password.passwords[1] will be destroyed
- resource "random_password" "passwords" {
- id = "none" -> null
- length = 20 -> null
- lower = true -> null
- min_lower = 0 -> null
- min_numeric = 0 -> null
- min_special = 0 -> null
- min_upper = 0 -> null
- number = true -> null
- result = (sensitive value)
- special = false -> null
- upper = true -> null
}

# rediscloud_subscription.rahul-test-terraform will be destroyed
- resource "rediscloud_subscription" "rahul-test-terraform" {
- id = "1649277" -> null
- memory_storage = "ram" -> null
- name = "rahul-test-terraform" -> null
- payment_method_id = "XXXX" -> null
- persistent_storage_encryption = true -> null

- cloud_provider {
- cloud_account_id = "1" -> null
- provider = "AWS" -> null

- region {
- multiple_availability_zones = false -> null
- networking_deployment_cidr = "10.0.0.0/24" -> null
- networks = [
- {
- networking_deployment_cidr = "10.0.0.0/24"
- networking_subnet_id = "subnet-0055e8e3ee3ea796e"
- networking_vpc_id = ""
},
] -> null
- preferred_availability_zones = [
- "us-east-1a",
] -> null
- region = "us-east-1" -> null
}
}

- database {
# At least one attribute in this block is (or was) sensitive,
# so its contents will not be displayed.
}
}

Plan: 0 to add, 0 to change, 3 to destroy.

Do you really want to destroy all resources?
Terraform will destroy all your managed infrastructure, as shown above.
There is no undo. Only 'yes' will be accepted to confirm.

Enter a value: yes

rediscloud_subscription.rahul-test-terraform: Destroying... [id=1649277]

rediscloud_subscription.rahul-test-terraform: Destruction complete after 1m34s
random_password.passwords[0]: Destroying... [id=none]
random_password.passwords[1]: Destroying... [id=none]
random_password.passwords[0]: Destruction complete after 0s
random_password.passwords[1]: Destruction complete after 0s

Destroy complete! Resources: 3 destroyed.

Further References:

- + \ No newline at end of file diff --git a/create/azure/index.html b/create/azure/index.html index 614ca39a27..e7da1cb450 100644 --- a/create/azure/index.html +++ b/create/azure/index.html @@ -4,7 +4,7 @@ Azure Cache for Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@
- + \ No newline at end of file diff --git a/create/azure/portal/index.html b/create/azure/portal/index.html index 10a6f6381a..1d66204b67 100644 --- a/create/azure/portal/index.html +++ b/create/azure/portal/index.html @@ -4,7 +4,7 @@ Create Redis database on Azure Cache | The Home of Redis Developers - + @@ -13,7 +13,7 @@

Create Redis database on Azure Cache

Redis is an open source, in-memory, key-value data store most commonly used as a primary database, cache, message broker, and queue. Redis cache delivers sub-millisecond response times, enabling fast and powerful real-time applications in industries such as gaming, fintech, ad-tech, social media, healthcare, and IoT. Developers love Redis due to its speed, simplicity and performance.

The Azure cloud platform has more than 200+ products and cloud services designed to help you bring new solutions to life-to solve today's challenges and create the future. Azure services help you to build, run, and manage applications across multiple clouds, on-premises, and at the edge, with the tools and frameworks of your choice.

Azure Cache for Redis is a native fully-managed service on Microsoft Azure. Azure Cache for Redis offers both the Redis open-source (OSS Redis) and a commercial product from Redis (Redis Enterprise) as a managed service. It provides secure and dedicated Redis server instances and full Redis API compatibility. The service is operated by Microsoft, hosted on Azure, and accessible to any application within or outside of Microsoft Azure.

Azure Cache for Redis dashboard uses Azure Monitor to provide several options for monitoring your cache instances.Learn more Use Azure Monitor to:

  • View metrics
  • Pin metrics charts to the Startboard
  • Customize the date and time range of monitoring charts
  • Add and remove metrics from the charts
  • Set alerts when certain conditions are met

Step 1. Getting Started

Search for "azure redis cache " in the search dashboard and launch Azure Cache for Redis Enterprise

RedisLabs Azure Page

Step 2: Setup & Subscribe

RedisLabs Azure Page

Step 3: Configuring New Redis Cache Instance

RedisLabs Azure Page

Step 4: Connecting to Redis database

You can directly connect to the Redis cache instances using the Redis CLI command (redis-cli) as shown:

sudo redis-cli -h demos.redis.cache.windows.net -p 6379
demos.redis.cache.windows.net:6379>
tip

You can have multiple clients connected to a Redis database at the same time. The above Redis client command might require a password if you have setup authentication in your Redis configuration file. You can insert data to Redis using the SET command and then fetch it back with the GET command. You can also run the Redis INFO command to get the statistics about the health of the Redis server (for example, memory usage, Redis server load etc).

Resources

Next Steps

Redis Launchpad
- + \ No newline at end of file diff --git a/create/azure/terraform-private-endpoint/index.html b/create/azure/terraform-private-endpoint/index.html index 3b5edf4146..deea250ea8 100644 --- a/create/azure/terraform-private-endpoint/index.html +++ b/create/azure/terraform-private-endpoint/index.html @@ -4,7 +4,7 @@ Azure Cache for Redis Enterprise using Terraform with Private Link | The Home of Redis Developers - + @@ -14,7 +14,7 @@

Azure Cache for Redis Enterprise using Terraform with Private Link

Azure Private Link for Azure Cache for Redis provides private connectivity from a virtual network to your cache instance. This means that you can now use Azure Private Link to connect to an Azure Cache for Redis instance from your virtual network via a private endpoint, which is assigned a private IP address in a subnet within the virtual network.It simplifies the network architecture and secures the connection between endpoints in Azure by eliminating data exposure to the public internet. Private Link carries traffic privately, reducing your exposure to threats and helps you meet compliance standards.

Azure Resource Manager(a.k.a AzureRM) is the deployment and management service for Azure. It provides a management layer that enables you to create, update, and delete resources in your Azure account. You can use management features, like access control, locks, and tags, to secure and organize your resources after deployment. The "azurerm_redis_enterprise_cluster" is a resource that manages a Redis Enterprise cluster. This is a template to get started with the 'azurerm_redis_enterprise_cluster' resource available in the 'azurerm' provider with Terraform.

Prerequisite

  1. Terraform
  2. Azure CLI

Step 1. Getting Started

Login in Azure using the Azure CLI

az login

Login with a Service Principal will also work

Login using an Azure Service Principal

az login --service-principal --username APP_ID --tenant TENANT_ID --password [password || /path/to/cert]

Step 2: Clone the repository

git clone https://github.com/redis-developer/acre-terraform

Step 3: Initialize the repository

cd acre-terraform
terraform init

The output should include: Terraform has been successfully initialized

Step 4: Modify the variables(optional)

The default variables are setup to deploy the smallest 'E10' instance into the 'East US' region. Changes can be made by updating the variables.tf file.

Step 5: Verify the plan

The 'plan' output will show you everything being created by the template.

terraform plan

The output should include: Plan: 18 to add, 0 to change, 0 to destroy.

Step 6: Apply the plan

When the plan looks good, 'apply' the template.

terraform apply

The output should include: Apply complete! Resources: 18 added, 0 changed, 0 destroyed.

Step 7: Connect using generated output

The access key is sensitive, so viewing the outputs must be requested specifically. The output is also in JSON format.

terraform output redisgeek_config

Example output:

{
"hostname" = "redisgeek-8jy4.eastus.redisenterprise.cache.azure.net"
"access_key" = "DQYABC3uRMXXXXXXXXXXXXXXXXTRkfgOXXXPjs82Y="
"port" = "10000"
}

Resources

1. How to use Redis Cache for Redis like a Pro
2. Do More with Azure Cache for Redis, Enterprise Tiers

References

- + \ No newline at end of file diff --git a/create/azure/terraform-simple/index.html b/create/azure/terraform-simple/index.html index 56c32fe370..996fc9fd04 100644 --- a/create/azure/terraform-simple/index.html +++ b/create/azure/terraform-simple/index.html @@ -4,7 +4,7 @@ Azure Cache for Redis Enterprise using Terraform | The Home of Redis Developers - + @@ -14,7 +14,7 @@

Azure Cache for Redis Enterprise using Terraform

The Enterprise Tiers of Azure Cache for Redis is generally available as a native fully managed service on Microsoft Azure. This offering combines Azure’s global presence, flexibility, security, and compliance with Redis Enterprise’s unmatched availability, performance, and extended data structure functionality to create the best experience for enterprises. Enterprise features include:

Azure Resource Manager(a.k.a AzureRM) is the deployment and management service for Azure. It provides a management layer that enables you to create, update, and delete resources in your Azure account. You use management features, like access control, locks, and tags, to secure and organize your resources after deployment.

The "azurerm_redis_enterprise_cluster" is a resource that manages a Redis Enterprise cluster. This is a template to get started with the 'azurerm_redis_enterprise_cluster' resource available in the 'azurerm' provider with Terraform.

Prerequisite

  1. Terraform CLI
  2. Azure CLI

Step 1. Getting Started

Login in Azure using the Azure CLI

az login

Step 2: Clone the repository

git clone https://github.com/redis-developer/acre-terraform-simple

Step 3: Initialize the repository

cd acre-terraform-simple
terraform init

The output should include: Terraform has been successfully initialized

Step 4: Modify the variables(optional)

The default variables are setup to deploy the smallest 'E10' instance into the 'East US' region. Changes can be made by updating the variables.tf file.

Step 5: Verify the plan

The 'plan' output will show you everything being created by the template.

terraform plan

The plan step does not make any changes in Azure

Step 6: Apply the plan

When the plan looks good, 'apply' the template.

terraform apply

Step 7: Connect using generated output

The access key is sensitive, so viewing the outputs must be requested specifically. The output is also in JSON format.

terraform output redisgeek_config

Example output:

{
"hostname" = "redisgeek-8jy4.eastus.redisenterprise.cache.azure.net"
"access_key" = "DQYABC3uRMyDguEXXXXXXXXXXWTRkfgOPjs82Y="
"port" = "10000"
}

Resources

How to use Redis Cache for Redis like a Pro
Do More with Azure Cache for Redis, Enterprise Tiers

References

- + \ No newline at end of file diff --git a/create/azurefunctions/index.html b/create/azurefunctions/index.html index 29078128a4..32acfc5b73 100644 --- a/create/azurefunctions/index.html +++ b/create/azurefunctions/index.html @@ -4,7 +4,7 @@ Getting Started with Azure Functions and Redis | The Home of Redis Developers - + @@ -14,7 +14,7 @@

Getting Started with Azure Functions and Redis


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

alt_text

Azure Functions is an event-based, serverless compute platform offered by Microsoft to accelerate and simplify serverless application development. It allows developers to write less code, build and debug locally without additional setup, and deploy and operate at scale in the cloud.

How it works

Azure Functions allows you to implement your system's logic into readily available blocks of code. These code blocks are called "functions." An Azure function's execution is triggered when an event is fired. Whenever demand for execution increases, more and more resources are allocated automatically to the service, and when requests fall, all extra resources and application instances drop off automatically. In short, as a developer, you can now focus on the pieces of code that matter most to you, and Azure Functions handles the rest.

Azure Functions provides as many or as few compute resources as needed to meet your application's demand. Providing compute resources on-demand is the essence of serverless computing in Azure Functions.

Benefits of Microsoft Azure Functions

  • Azure Functions provides automated and flexible scaling.
  • It allows you to build, debug, deploy, and monitor with integrated tools and built-in DevOps capabilities.
  • It supports a variety of programming languages such as C#, Java, JavaScript, Python, and PowerShell.
  • It allows you to use Functions extensions on Visual Studio and Visual Studio Code for faster and more efficient development on your local system.
  • With Azure Functions you can set up CI/CD with Azure Pipelines.
  • It’s a great solution for processing bulk data, integrating systems, working with IoT, and building simple APIs and microservices.
  • It’s used to break monolithic architectures into loosely coupled functions.
  • It allows you to deploy Functions to Kubernetes.

In this tutorial, you will learn how to get started with Azure Functions and Redis.

Getting started

  • Step 1. Log in to Microsoft Azure Portal
  • Step 2. Set up Azure Cache for Redis
  • Step 3. Configure Keys for Redis Cache
  • Step 4. Verify if Redis database is reachable remotely
  • Step 5. Install Homebrew on Mac
  • Step 6. Install Visual Studio Code
  • Step 7. Install the Azure Functions Core Tools
  • Step 8. Install the Azure Functions extension for Visual Studio Code
  • Step 9. Connect Azure Function to Azure account
  • Step 10. Clone the project repository
  • Step 11. Trigger the function
  • Step 12. Verify the Azure Functions app is working properly
  • Step 13. Seed the Redis database
  • Step 14. Run query using RedisInsight

Step 1. Log in to Microsoft Azure Portal

Create an Azure account with an active subscription by clicking this link: Create an account for free.

Acount Dashboard

Step 2. Set up “Azure Cache for Redis”

Type "Azure Cache for Redis" in the search section and select the service:

Azure Cache for Redis Service

Under "New Redis Cache" window, create a new resource group, select your preferred location and cache type:

Creating a new Redis Instance

Once you are done with the entries, click "Review + Create" button.

alt_text

Wait for few seconds to let deployment process to complete.

Deploying your Redis Instance

Once the deployment is complete, you will be provided with the deployment name, subscription details and resource group.

Redis Instance up and running

Step 3. Configure Keys for Redis Cache

You will need keys to log in to the Redis database. Click "Overview" option in the left sidebar to see the Primary key and save it for future reference.

Managing keys

Step 4. Verify if Redis database is accessible

redis-cli -h demorediss.redis.cache.windows.net -p 6379
demorediss.redis.cache.windows.net:6379> info modules
NOAUTH Authentication required.
demorediss.redis.cache.windows.net:6379> auth jsn9IdFXXXXXXXXXXXXXsAzCaDzLh6s=
OK
demorediss.redis.cache.windows.net:6379> get a1
"100"

Step 5. Install Homebrew on Mac

Install the Homebrew package manager by running this script:

/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"

Step 6. Install Visual Studio Code

Visual Studio Code is a lightweight but powerful source code editor that runs on your desktop and is available for Windows, macOS, and Linux. It comes with built-in support for JavaScript, TypeScript, and Node.js, and has a rich ecosystem of extensions for other languages (such as C++, C#, Java, Python, PHP, Go) and runtimes (such as .NET and Unity). Begin your journey with VS Code with these introductory videos.

Visual Studio Code download

Step 7. Install the Azure Functions Core Tools

brew tap azure/functions
brew install azure-functions-core-tools@4
# if upgrading on a machine that has 2.x or 3.x installed:
brew link --overwrite azure-functions-core-tools@4

Step 8. Install the Azure Functions extension for Visual Studio Code

Use the Azure Functions extension to quickly create, debug, manage, and deploy serverless apps directly from VS Code.

The Azure Functions extension for Visual Studio Code

Step 9. Connect Azure Function to Azure account

Configuring Azure in Visual Studio Code

Step 10. Clone the project repository

For this tutorial, we will be using a baby names counter app built using C#. To get started, we will first clone the repository:

git clone https://github.com/redis-developer/Baby-Names-Func


Add “Azure Cache for Redis” endpoint URL details in the local-settings.json file as shown below:

{
"IsEncrypted": false,
"Values": {
"FUNCTIONS_WORKER_RUNTIME": "dotnet"
"redisCacheConnectionString": "demorediss.redis.cache.windows.net"


}
}

Open the project with Visual Studio Code by running the following command:

cd Baby-Names-Func
code .

This will open VS Code. The function will automatically load into the plugin.

Project loaded into Visual Studio Code

Step 11. Trigger the function

Press F5 to automatically execute the function.

Executing the function

If you want to manually select the repository, choose .NET framework, etc., and then click “Create new project.”

Creating a new project

You will find the following output under VS Code screen:

     1>Done Building Project "/Users/ajeetraina/projects/Baby-Names-Func/RedisFunctions.csproj" (Clean target(s)).

Terminal will be reused by tasks, press any key to close it.

> Executing task: dotnet build /property:GenerateFullPaths=true /consoleloggerparameters:NoSummary <

Microsoft (R) Build Engine version 17.0.0+c9eb9dd64 for .NET
Copyright (C) Microsoft Corporation. All rights reserved.

Determining projects to restore...
All projects are up-to-date for restore.
RedisFunctions -> /Users/ajeetraina/projects/Baby-Names-Func/bin/Debug/net6.0/RedisFunctions.dll

Terminal will be reused by tasks, press any key to close it.

> Executing task: func host start <


Azure Functions Core Tools
Core Tools Version: 4.0.3971 Commit hash: d0775d487c93ebd49e9c1166d5c3c01f3c76eaaf (64-bit)
Function Runtime Version: 4.0.1.16815

[2022-03-01T07:51:01.383Z] Found /Users/ajeetraina/projects/Baby-Names-Func/RedisFunctions.csproj. Using for user secrets file configuration.

Functions:

CountBabyNames: [GET,POST] http://localhost:7071/api/getCount

IncrementBabyName: [GET,POST] http://localhost:7071/api/increment

For detailed output, run func with --verbose flag.


Step 12. Verify the Azure functions app is working properly

Output in the browser from running the application locally

Step 13. Seed the Redis database

Now, let us seed BabyNames data into the Redis database.

git clone https://github.com/slorello89/Seed-Baby-Names

If you connect to the Redis database and run the MONITOR command, you should see the data being inserted into the database as shown below:

1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Rowen" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Titus" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Braxton" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Alexander" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Finnegan" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Nasir" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Fabian" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Alexander" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Emilio" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Dax" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Johnny" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Mario" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Lennox" "1"

Step 14. Run query using RedisInsight

Follow this link to set up RedisInsight on your local system and get connected to the Redis database. Once connected, you should be able to run the following queries:

Redis Insight

> CMS.INFO baby-names
1) width
2) (integer) 1000
3) depth
4) (integer) 10
5) count
6) (integer) 100000

> CMS.QUERY baby-names Johnny
1) 109

Additional references:

- + \ No newline at end of file diff --git a/create/cloud/aws/index.html b/create/cloud/aws/index.html index 9681f9998f..92fd43020a 100644 --- a/create/cloud/aws/index.html +++ b/create/cloud/aws/index.html @@ -4,7 +4,7 @@ Create Database using AWS Cloud | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Create Database using AWS Cloud

Redis Enterprise Cloud on AWS is a fully Managed Redis Enterprise as a service. Designed for modern distributed applications, Redis Enterprise Cloud on AWS is known for its high performance, infinite scalability and true high availability.

Follow the below steps to setup Redis Enterprise Cloud hosted over AWS Cloud:

Step 1. Getting Started

Follow this link to register.

AWS Cloud

Step 2. Choose AWS Cloud

For the cloud provider, select Amazon AWS and choose Free plan.

AWS Cloud

Step 3. Create database

AWS Cloud

Step 4. Click "Activate"

AWS Cloud

Next Steps

- + \ No newline at end of file diff --git a/create/cloud/azure/index.html b/create/cloud/azure/index.html index dd91bf0469..ccb98d5b4e 100644 --- a/create/cloud/azure/index.html +++ b/create/cloud/azure/index.html @@ -4,7 +4,7 @@ Create Database using Azure Cache for Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Create Database using Azure Cache for Redis

Azure Cache for Redis is a native fully-managed service on Microsoft Azure. Azure Cache for Redis offers both the Redis open-source (OSS Redis) and a commercial product from Redis (Redis Enterprise) as a managed service. It provides secure and dedicated Redis server instances and full Redis API compatibility. The service is operated by Microsoft, hosted on Azure, and accessible to any application within or outside of Azure.

Step 1. Getting Started

Launch Azure Cache for Redis Enterprise & Flash

RedisLabs Azure Page

Step 2: Setup & Subscribe

RedisLabs Azure Page

Step 3: Configuring New Redis Cache

RedisLabs Azure Page

Step 4: Finalising the setup

RedisLabs Azure Page

Step 5: Connecting to Redis database

sudo redis-cli -h redislabs.redis.cache.windows.net -p 6379
redislabs.redis.cache.windows.net:6379>

Next Steps

- + \ No newline at end of file diff --git a/create/cloud/gcp/index.html b/create/cloud/gcp/index.html index 0eea6c6fe3..8eb5475ef2 100644 --- a/create/cloud/gcp/index.html +++ b/create/cloud/gcp/index.html @@ -4,7 +4,7 @@ Create Database using Google Cloud | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Create Database using Google Cloud

Redis Enterprise Cloud delivers fully managed Redis Enterprise as a Service. It offers all the capabilities of Redis Enterprise while taking care of all the operational aspects associated with operating Redis in the most efficient manner on Google Cloud Platform. Redis Enterprise Cloud is built on a complete serverless concept, so users don’t need to deal with nodes and clusters

Step 1. Getting Started

Launch Redis Enterprise Cloud page on Google Cloud Platform

Google Cloud

Step 2. Click "Manage via Redis Labs"

Google Cloud

Step 3. Create Subscription

Google Cloud

Step 4. Specify the database name

Google Cloud

Step 5. Enter sizing details

Google Cloud

Step 6: Review & Create

Google Cloud

Step 7. Verify the details

Google Cloud

Step 8. Finalising the setup

Google Cloud

Next Steps

- + \ No newline at end of file diff --git a/create/cloud/images/index.html b/create/cloud/images/index.html index 5667aa6b6f..d0e523c454 100644 --- a/create/cloud/images/index.html +++ b/create/cloud/images/index.html @@ -4,7 +4,7 @@ images | The Home of Redis Developers - + @@ -12,7 +12,7 @@
- + \ No newline at end of file diff --git a/create/cloud/index.html b/create/cloud/index.html index 411e9e218c..a21c80ed0c 100644 --- a/create/cloud/index.html +++ b/create/cloud/index.html @@ -4,7 +4,7 @@ Create Database using Cloud | The Home of Redis Developers - + @@ -12,7 +12,7 @@
- + \ No newline at end of file diff --git a/create/cloud/rediscloud/images/index.html b/create/cloud/rediscloud/images/index.html index 8f677f2e14..d084888b53 100644 --- a/create/cloud/rediscloud/images/index.html +++ b/create/cloud/rediscloud/images/index.html @@ -4,7 +4,7 @@ images | The Home of Redis Developers - + @@ -12,7 +12,7 @@
- + \ No newline at end of file diff --git a/create/cloud/rediscloud/index.html b/create/cloud/rediscloud/index.html index 799f7c81bb..726c33834b 100644 --- a/create/cloud/rediscloud/index.html +++ b/create/cloud/rediscloud/index.html @@ -4,7 +4,7 @@ Create Database using Redis Enterprise Cloud | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Create Database using Redis Enterprise Cloud

Redis Enterprise Cloud is a fully managed cloud service by Redis. Built for modern distributed applications, Redis Enterprise Cloud enables you to run any query, simple or complex, at sub-millisecond performance at virtually infinite scale without worrying about operational complexity or service availability. With modern probabilistic data structures and extensible data models, including Search, JSON, Graph, and Time Series, you can rely on Redis as your data-platform for all your real-time needs.

Step 1. Create free cloud account

Create your free Redis Enterprise Cloud account. Once you click on “Get Started”, you will receive an email with a link to activate your account and complete your signup process.

My Image

image

Step 2. Create Your subscription

Next, you will have to create Redis Enterprise Cloud subscription. In the Redis Enterprise Cloud menu, click "Create your Subscription".

My Image

Step 3. Select the right Subscription Plan

Select "Fixed Plan" for low throughout application as for now.

My Image

Step 4. Select cloud vendor

For the cloud provider, select your preferred cloud (for demo purpose)

My Image

Step 5. Click "Create Subscription"

Finally, click on "Create Subscription" button.

My Image

You can now verify the subscription as shown below:

My Image

Step 6. Create database

Click "Create Database". Enter database name and your preferred module.

My Image

Step 7. Launch database

Click "Activate" and wait for few seconds till it gets activated. Once fully activated, you will see the database endpoints as shown below:

My Image

Next Steps

Redis Launchpad
- + \ No newline at end of file diff --git a/create/cloudrun/index.html b/create/cloudrun/index.html index fd9133d09c..481ad14adf 100644 --- a/create/cloudrun/index.html +++ b/create/cloudrun/index.html @@ -4,7 +4,7 @@ Getting Started with Google Cloud Run and Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Getting Started with Google Cloud Run and Redis


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

alt_text

If you’re looking for a solution that lets you go from container to URL within seconds, check out Google Cloud Run. Cloud Run is Google’s fully managed compute platform for running stateless, HTTP-driven containers. By using a single command (“gcloud run deploy”) you can convert a container image to a fully managed web application and run it in a production environment with auto-scalability, high availability, and security.

Google Cloud Run allows you to deploy and scale serverless HTTP containers without worrying about provisioning servers, scaling servers up and down to meet demands, or overpaying by consuming more resources than necessary. It makes container deployment much easier. It is good for developing software in cloud applications, hence delivering web apps, APIs, background jobs, etc.

Cloud Run is powered and built on Knative. Knative is an open source community project which adds components for deploying, running, and managing serverless, cloud-native applications to Kubernetes. It allows you to easily run your containers either in your https://cloud.google.com/kubernetes-engine (GKE) cluster with Cloud Run on GKE or fully managed with Cloud Run. Cloud Run helps developers focus on writing high-value code, regardless of where their organizations are on the path to the cloud.

Compelling features of Google Cloud Run

In this tutorial, you will learn how to deploy a simple Redis rate limiting application to Google Cloud Run in just 5 minutes.

Table of Contents

  • Step 1. Set up a free Redis Enterprise Cloud account
  • Step 2. Install Google Cloud CLI
  • Step 3. Authenticate your GCP account
  • Step 4. Enable Google Services
  • Step 5. Deploy to Google Cloud Run from source
  • Step 6. Verify if service is listed
  • Step 7. Set up environment variables
  • Step 8. Access your app

Step 1. Set up a free Redis Enterprise Cloud account

Visit developer.redis.com/create/rediscloud/ and create a free Redis Enterprise Cloud account. Once you complete the signup tutorial, you will be provided with the database endpoint URL, port and password. Save these for future reference.

tip

For a limited time, use TIGER200 to get $200 credits on Redis Enterprise Cloud and try all the advanced capabilities!

🎉 Click here to sign up

Step 2. Install Google Cloud CLI

Run the following command to install Google Cloud CLI Core Libraries and dependencies:

brew install --cask google-cloud-sdk

Step 3. Authenticate your GCP account

To deploy your app you must first download, install, and initialize the gcloud CLI.

Download and install gcloud SDK via https://cloud.google.com/sdk/docs

gcloud auth login

Allow Google Cloud SDK access to your Google Account:

Authenticating with Google Cloud

Step 4. Enable Google Services

gcloud services enable \
artifactregistry.googleapis.com \
cloudbuild.googleapis.com \
run.googleapis.com
Operation "operations/acat.p2-406459833831-88327c08-1fe9-4d9a-a6b9-db8c8b007863" finished successfully.


Step 5 . Deploy to Google Cloud Run from source

Deploying from source automatically builds a container image from source code and deploys it.

gcloud run deploy
Deploying from source. To deploy a container use [--image]. See https://cloud.google.com/run/docs/deploying-source-code for more details.
Source code location (/Users/ajeetraina/projects/googlecloud/basic-rate-limiting-demo-python/google-cloud-run):
Next time, use `gcloud run deploy --source .` to deploy the current directory.

Service name (google-cloud-run):
Please specify a region:
[1] asia-east1
[2] asia-east2
[3] asia-northeast1
[4] asia-northeast2
[5] asia-northeast3
[6] asia-south1
[7] asia-south2
[8] asia-southeast1
[9] asia-southeast2
[10] australia-southeast1
[11] australia-southeast2
[12] europe-central2
[13] europe-north1
[14] europe-west1
[15] europe-west2
[16] europe-west3
[17] europe-west4
[18] europe-west6
[19] northamerica-northeast1
[20] northamerica-northeast2
[21] southamerica-east1
[22] southamerica-west1
[23] us-central1
[24] us-east1
[25] us-east4
[26] us-west1
[27] us-west2
[28] us-west3
[29] us-west4
[30] cancel
Please enter your numeric choice: 1

To make this the default region, run `gcloud config set run/region asia-east1`.

Deploying from source requires an Artifact Registry Docker repository to store built containers. A repository named
[cloud-run-source-deploy] in region [asia-east1] will be created.

Do you want to continue (Y/n)? Y

This command is equivalent to running `gcloud builds submit --tag [IMAGE] /Users/ajeetraina/projects/googlecloud/basic-rate-limiting-demo-python/google-cloud-run` and `gcloud run deploy google-cloud-run --image [IMAGE]`

Allow unauthenticated invocations to [google-cloud-run] (y/N)? y

Building using Dockerfile and deploying container to Cloud Run service [google-cloud-run] in project [redislabs-marketing-project] region [asia-east1]
⠼ Building and deploying new service... Uploading sources.
✓ Creating Container Repository...
✓ Uploading sources...
. Building Container...
. Creating Revision...
. Routing traffic...
. Setting IAM Policy…

Please enter your numeric choice: 1

To make this the default region, run `gcloud config set run/region asia-east1`.

Allow unauthenticated invocations to [django-redis-rate-limiting-example] (y/N)? y

Deploying container to Cloud Run service [django-redis-rate-limiting-example] in project [redislabs-marketing-project] region [asia-east1]
✓ Deploying new service... Done.
✓ Creating Revision... Revision deployment finished. Waiting for health check to begin.
✓ Routing traffic...
✓ Setting IAM Policy...
Done.
Service [django-redis-rate-limiting-example] revision [django-redis-rate-limiting-example-00001-mog] has been deployed and is serving 100 percent of traffic.
Service URL: https://django-redis-rate-limiting-example-opcboau66a-de.a.run.app


Step 6. Verify if the service is listed under Cloud Run dashboard

Verifying if the service is listed

Verifying if the service is listed

Verifying the service via terminal UI

$ gcloud run services describe django-redis-rate-limiting-example

✔ Service django-redis-rate-limiting-example in region asia-east1

URL: https://django-redis-rate-limiting-example-opcboau66a-de.a.run.app
Ingress: all
Traffic:
100% LATEST (currently django-redis-rate-limiting-example-00001-mog)

Last updated on 2022-02-19T02:50:03.100357Z by ajeet.raina@redis.com:
Revision django-redis-rate-limiting-example-00001-mog
Image: gcr.io/redislabs-marketing-project/django-redis-rate-limiting-example@sha256:95bf8d5705c4ab52d2c9ba07d7dcf0c651431d8d5d38ad8806487aaa8a8a870d at 95bf8d57...
Port: 8080
Memory: 512Mi
CPU: 1000m
Service account: 406459833831-compute@developer.gserviceaccount.com
Concurrency: 80
Max Instances: 100
Timeout: 300s

Step 7. Set up environment variables

  • Adding Redis URL to point to Redis Enterprise Cloud

Setting up environment variables

  • Enable HTTPS

Enabling HTTPS

  • Ensuring the VPC connector shows as “none”

Configuring the VPC connector

  • Allowing all traffic

Allowing all traffic

Step 8. Access your app

Wait for the build and deploy to complete. When finished, a message with unique URL will be displayed.

note

The URL shown in your case will be different from the one shown below.

Open https://django-redis-rate-limiting-example-opcboau66a-de.a.run.app/ and access your app.

Running the application

This app allows you to choose the desired number of requests in each 10 second period. The app blocks connections from a client after surpassing a certain amount of requests (default: 10) in the time window (default: 10 sec). That will let the user know how many requests they have remaining before they run over the limit. On the tenth run, the server should return an HTTP status code of 429 (“Too Many Requests”).

Additional references:

- + \ No newline at end of file diff --git a/create/docker/index.html b/create/docker/index.html index f8cdd9ae32..8cb7dadc8b 100644 --- a/create/docker/index.html +++ b/create/docker/index.html @@ -4,7 +4,7 @@ Overview | The Home of Redis Developers - + @@ -12,7 +12,7 @@ - + \ No newline at end of file diff --git a/create/docker/nodejs-nginx-redis/index.html b/create/docker/nodejs-nginx-redis/index.html index ab180fbde4..75379f87f9 100644 --- a/create/docker/nodejs-nginx-redis/index.html +++ b/create/docker/nodejs-nginx-redis/index.html @@ -4,7 +4,7 @@ How to build and run a Node.js application using Nginx, Docker and Redis | The Home of Redis Developers - + @@ -13,7 +13,7 @@

How to build and run a Node.js application using Nginx, Docker and Redis


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Thanks to Node.js - Millions of frontend developers that write JavaScript for the browser are now able to write the server-side code in addition to the client-side code without the need to learn a completely different language. Node.js is a free, open-sourced, cross-platform JavaScript run-time environment. It is capable to handle thousands of concurrent connections with a single server without introducing the burden of managing thread concurrency, which could be a significant source of bugs.

Nginx-node

In this quickstart guide, you will see how to build a Node.js application (visitor counter) using Nginx, Redis and Docker.

What do you need?

  • Node.js: An open-source, cross-platform, back-end JavaScript runtime environment that runs on the V8 engine and executes JavaScript code outside a web browser.
  • Nginx: An open source software for web serving, reverse proxying, caching, load balancing, media streaming, and more.
  • Docker: a containerization platform for developing, shipping, and running applications.
  • Docker Compose: A tool for defining and running multi-container Docker applications.

Project structure

.
├── docker-compose.yml
├── redis
├── nginx
│   ├── Dockerfile
│   └── nginx.conf
├── web1
│   ├── Dockerfile
│   ├── package.json
│   └── server.js
└── web2
├── Dockerfile
├── package.json
└── server.js

Prerequisite:

– Install Docker Desktop

Visit https://docs.docker.com/desktop/mac/install/ to setup Docker Desktop for Mac or Windows on your local system.

Image1

info

Docker Desktop comes with Docker compose installed by default, hence you don't need to install it separately.

Step 1. Create a Docker compose file

Create an empty file with the below content and save it by name - "docker-compose.yml"

version: '3.9'
services:
redis:
image: 'redis:alpine'
ports:
- '6379:6379'
web1:
restart: on-failure
build: ./web1
ports:
- '81:5000'
web2:
restart: on-failure
build: ./web2
ports:
- '82:5000'
nginx:
build: ./nginx
ports:
- '80:80'
depends_on:
- web1
- web2

The compose file defines an application with four services redis, web1, web2 and nginx. When deploying the application, docker-compose maps port 80 of the web service container to port 80 of the host as specified in the file.

info

By default, Redis runs on port 6379. Make sure you don't run another instance of Redis on your system or port 6379 on the host is not being used by another container, otherwise the port should be changed.

Step 2. Create an nginx directory and add the below files:

File: nginx/nginx.conf

upstream loadbalancer {
server web1:5000;
server web2:5000;
}

server {
listen 80;
server_name localhost;
location / {
proxy_pass http://loadbalancer;
}
}

File: Dockerfile

FROM nginx
RUN rm /etc/nginx/conf.d/default.conf
COPY nginx.conf /etc/nginx/conf.d/default.conf

Step 3. Create a web directory and add the below files:

File: web/Dockerfile

FROM node:alpine

WORKDIR /usr/src/app

COPY ./package.json ./
RUN npm install
COPY ./server.js ./

CMD ["npm","start"]

File: web/package.json


"name": "web",
"version": "1.0.0",
"description": "Running Node.js and Express.js on Docker",
"main": "server.js",
"scripts": {
"start": "node server.js"
},
"dependencies": {
"express": "^4.17.2",
"redis": "3.1.2"
},
"author": "",
"license": "MIT"
}

File: web/server.js

const express = require('express');
const redis = require('redis');
const app = express();
const redisClient = redis.createClient({
host: 'redis',
port: 6379
});

app.get('/', function(req, res) {
redisClient.get('numVisits', function(err, numVisits) {
numVisitsToDisplay = parseInt(numVisits) + 1;
if (isNaN(numVisitsToDisplay)) {
numVisitsToDisplay = 1;
}
res.send('Number of visits is: ' + numVisitsToDisplay);
numVisits++;
redisClient.set('numVisits', numVisits);
});
});

app.listen(5000, function() {
console.log('Web application is listening on port 5000');
});

Step 4. Creating a web1 directory and add the below files:

File: Dockerfile

FROM node:alpine

WORKDIR /usr/src/app

COPY ./package*.json ./
RUN npm install
COPY ./server.js ./

CMD ["npm","start"]

File: server.js

const express = require('express');
const redis = require('redis');
const app = express();
const redisClient = redis.createClient({
host: 'redis',
port: 6379
});


app.get('/', function(req, res) {
redisClient.get('numVisits', function(err, numVisits) {
numVisitsToDisplay = parseInt(numVisits) + 1;
if (isNaN(numVisitsToDisplay)) {
numVisitsToDisplay = 1;
}
res.send('web1: Total number of visits is: ' + numVisitsToDisplay);
numVisits++;
redisClient.set('numVisits', numVisits);
});
});

app.listen(5000, function() {
console.log('Web app is listening on port 5000');
});

File: package.json

{
"name": "web1",
"version": "1.0.0",
"description": "Running Node.js and Express.js on Docker",
"main": "server.js",
"scripts": {
"start": "node server.js"
},
"dependencies": {
"express": "^4.17.2",
"redis": "3.1.2"
},
"author": "",
"license": "MIT"
}

Step 5. Deploy the application

Let us deploy the full-fledged app using docker-compose

$ docker-compose up -d
Creating nginx-nodejs-redis_redis_1 ... done
Creating nginx-nodejs-redis_web1_1 ... done
Creating nginx-nodejs-redis_web2_1 ... done
Creating nginx-nodejs-redis_nginx_1 ... done

Expected result

Listing containers must show three containers running and the port mapping as below:

docker-compose ps
Name Command State Ports
------------------------------------------------------------------------------------------
nginx-nodejs-redis_nginx_1 /docker-entrypoint.sh ngin Up 0.0.0.0:80->80/tcp
...
nginx-nodejs-redis_redis_1 docker-entrypoint.sh redis Up 0.0.0.0:6379->6379/tcp
...
nginx-nodejs-redis_web1_1 docker-entrypoint.sh npm Up 0.0.0.0:81->5000/tcp
start
nginx-nodejs-redis_web2_1 docker-entrypoint.sh npm Up 0.0.0.0:82->5000/tcp
start

Step 6. Testing the app

After the application starts, navigate to http://localhost:80 in your web browser or run:

curl localhost:80
curl localhost:80
web1: Total number of visits is: 1
curl localhost:80
web1: Total number of visits is: 2
$ curl localhost:80
web2: Total number of visits is: 3
$ curl localhost:80
web2: Total number of visits is: 4

Step 7. Monitoring Redis keys

If you want to monitor the Redis keys, you can use monitor command. Install redis-client in your Mac system using brew install redis and then directly connect to Redis container by issuing the below command:

% redis-cli
127.0.0.1:6379> monitor
OK
1646485507.290868 [0 172.24.0.2:34330] "get" "numVisits"
1646485507.309070 [0 172.24.0.2:34330] "set" "numVisits" "5"
1646485509.228084 [0 172.24.0.2:34330] "get" "numVisits"
1646485509.241762 [0 172.24.0.2:34330] "set" "numVisits" "6"
1646485509.619369 [0 172.24.0.4:52082] "get" "numVisits"
1646485509.629739 [0 172.24.0.4:52082] "set" "numVisits" "7"
1646485509.990926 [0 172.24.0.2:34330] "get" "numVisits"
1646485509.999947 [0 172.24.0.2:34330] "set" "numVisits" "8"
1646485510.270934 [0 172.24.0.4:52082] "get" "numVisits"
1646485510.286785 [0 172.24.0.4:52082] "set" "numVisits" "9"
1646485510.469613 [0 172.24.0.2:34330] "get" "numVisits"
1646485510.480849 [0 172.24.0.2:34330] "set" "numVisits" "10"
1646485510.622615 [0 172.24.0.4:52082] "get" "numVisits"
1646485510.632720 [0 172.24.0.4:52082] "set" "numVisits" "11"

Further References

- + \ No newline at end of file diff --git a/create/docker/redis-on-docker/images/index.html b/create/docker/redis-on-docker/images/index.html index 82215e24da..94aa96746a 100644 --- a/create/docker/redis-on-docker/images/index.html +++ b/create/docker/redis-on-docker/images/index.html @@ -4,7 +4,7 @@ List of Images | The Home of Redis Developers - + @@ -12,7 +12,7 @@
- + \ No newline at end of file diff --git a/create/docker/redis-on-docker/index.html b/create/docker/redis-on-docker/index.html index 688b33b87a..aba3e177b5 100644 --- a/create/docker/redis-on-docker/index.html +++ b/create/docker/redis-on-docker/index.html @@ -4,7 +4,7 @@ How to Deploy and Run Redis in a Docker container | The Home of Redis Developers - + @@ -14,7 +14,7 @@

How to Deploy and Run Redis in a Docker container


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Pre-requisite

Ensure that Docker is installed in your system.

If you're new, refer https://docs.docker.com/docker-for-mac/install/ to install Docker on Mac.

To pull and start the Redis Enterprise Software Docker container, run this docker run command in the terminal or command-line for your operating system.

note

On Windows, make sure Docker is configured to run Linux-based containers.

docker run -d --cap-add sys_resource --name rp -p 8443:8443 -p 9443:9443 -p 12000:12000 redislabs/redis

In the web browser on the host machine, go to https://localhost:8443 to see the Redis Enterprise Software web console.

Step 1: Click on “Setup”

Click Setup to start the node configuration steps.

My Image

Step 2: Enter your preferred FQDN

In the Node Configuration settings, enter a cluster FQDN such as demo.redis.com. Then click Next button.

My Image

Enter your license key, if you have one. If not, click the Next button to use the trial version.

Step 3: Enter the admin credentials

Enter an email and password for the admin account for the web console.

My Image

These credentials are also used for connections to the REST API. Click OK to confirm that you are aware of the replacement of the HTTPS SSL/TLS certificate on the node, and proceed through the browser warning.

Step 4: Create a Database:

Select “redis database” and the “single region” deployment, and click Next.

My Image

Enter a database name such as demodb and click Activate to create your database

My Image

You now have a Redis database!

Step 5: Connecting using redis-cli

After you create the Redis database, you are ready to store data in your database. redis-cli is a built-in simple command-line tool to interact with Redis database. Run redis-cli, located in the /opt/redislabs/bin directory, to connect to port 12000 and store and retrieve a key in database1

$ docker exec -it rp bash
redislabs@fd8dca50f905:/opt$
/opt/redislabs/bin/redis-cli -p 12000
127.0.0.1:12000> auth <enter password>
OK
127.0.0.1:12000> set key1 123
OK
127.0.0.1:12000> get key1
"123"

Next Steps

Redis Launchpad
- + \ No newline at end of file diff --git a/create/from-source/index.html b/create/from-source/index.html index c12705fdc2..5453f4e16e 100644 --- a/create/from-source/index.html +++ b/create/from-source/index.html @@ -4,7 +4,7 @@ Create Redis database from Source | The Home of Redis Developers - + @@ -14,7 +14,7 @@

Create Redis database from Source


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Step 1: Download, extract and compile Redis

Redis stands for REmote DIctionary Server. Redis is an open source, in-memory, key-value data store most commonly used as a primary database, cache, message broker, and queue. Redis cache delivers sub-millisecond response times, enabling fast and powerful real-time applications in industries such as gaming, fintech, ad-tech, social media, healthcare, and IoT.

In order to install Redis from source, first you need to download the latest Redis source code. The Redis source code is available to download here. You can verify the integrity of these downloads by checking them against the digests in the redis-hashes git repository

wget https://download.redis.io/redis-stable.tar.gz
tar xvzf redis-stable.tar.gz
cd redis-stable
make

It is a good idea to copy both the Redis server and the command line interface into the proper places, either manually using the following commands:

sudo cp src/redis-server /usr/local/bin/
sudo cp src/redis-cli /usr/local/bin/

Or just using sudo make install.

The binaries that are now compiled are available in the src directory.

Step 2: Running Redis Server

Install the Redis server by running the following command:

$ redis-server
note

You don't need to restart the Redis service.

Step 3: Interacting with Redis Client

Once the Redis installation has completed, you can use the Redis client to connect to the Redis server. Use the following commands to store and retrieve a string:

$ src/redis-cli
redis> set foo bar
OK
redis> get foo
"bar"

redis.conf is the Redis configuration file, used to configure the behavior of the Redis Server. For more information on the available configuration options, check out the documentation on redis.io.

Next Steps

- + \ No newline at end of file diff --git a/create/gcp/index.html b/create/gcp/index.html index 6b3a2a2fbb..901ebb56d4 100644 --- a/create/gcp/index.html +++ b/create/gcp/index.html @@ -4,7 +4,7 @@ Create Redis database using Google Cloud | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Create Redis database using Google Cloud


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Redis Enterprise Cloud delivers fully managed Redis Enterprise as a Service. It offers all the capabilities of Redis Enterprise while taking care of all the operational aspects associated with operating Redis in the most efficient manner on Google Cloud Platform. Redis Enterprise Cloud is built on a complete serverless concept, so users don’t need to deal with nodes and clusters

Step 1. Getting Started

Launch Redis Enterprise Cloud page on Google Cloud Platform

Google Cloud

Step 2. Click "Manage via Redis Labs"

Google Cloud

Step 3. Create Subscription

Google Cloud

Step 4. Specify the database name

Google Cloud

Step 5. Enter sizing details

Google Cloud

Step 6: Review & Create

Google Cloud

Step 7. Verify the details

Google Cloud

Step 8. Finalising the setup

Google Cloud

Next Steps

Redis Launchpad
- + \ No newline at end of file diff --git a/create/heroku/herokugo/index.html b/create/heroku/herokugo/index.html index 3e91758363..e16a5d5c76 100644 --- a/create/heroku/herokugo/index.html +++ b/create/heroku/herokugo/index.html @@ -4,7 +4,7 @@ Deploy a Go app on Heroku using Redis | The Home of Redis Developers - + @@ -20,7 +20,7 @@ User data is stored in a hash where each user entry contains the next values:

  • username: unique user name;

  • password: hashed password

  • Additionally a set of rooms is associated with user

  • Rooms are sorted sets which contains messages where score is the timestamp for each message

  • Each room has a name associated with it

  • The "online" set is global for all users is used for keeping track on which user is online.

  • User hashes are accessed by key user:{userId}. The data for it stored with HSET key field data. User id is calculated by incrementing the total_users key (INCR total_users)

  • Usernames are stored as separate keys (username:{username}) which returns the userId for quicker access and stored with SET username:{username} {userId}.

  • Rooms that a user belongs to are stored at user:{userId}:rooms as a set of room ids. A room is added by the SADD user:{userId}:rooms {roomId} command.

  • Messages are stored at room:{roomId} key in a sorted set (as mentioned above). They are added with the ZADD room:{roomId} {timestamp} {message} command. The message is serialized to an app-specific JSON string.

How is the data accessed?

Get User HGETALL user:{id}.

 HGETALL user:2

which gets data for the user with id: 2.

  • Online users: SMEMBERS online_users. This will return ids of users who are online

  • Get the ids of rooms that a user is in: SMEMBERS user:{id}:rooms. Example:

 SMEMBERS user:2:rooms

This will return IDs of rooms for the user whose ID is 2

  • Get a list of messages: ZREVRANGE room:{roomId} {offset_start} {offset_end}. Example:
 ZREVRANGE room:1:2 0 50

This returns 50 messages with 0 offsets for the private room between users with IDs 1 and 2.

Next Steps

- + \ No newline at end of file diff --git a/create/heroku/herokujava/index.html b/create/heroku/herokujava/index.html index 50dbdf3428..f87d3b26f5 100644 --- a/create/heroku/herokujava/index.html +++ b/create/heroku/herokujava/index.html @@ -4,7 +4,7 @@ Deploy Java app on Heroku using Redis | The Home of Redis Developers - + @@ -14,7 +14,7 @@

Deploy Java app on Heroku using Redis


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Heroku is a cloud service provider and software development platform which facilitates fast and effective building, deploying and scaling of web applications. It offers you a ready-to-use environment that allows you to deploy your code fast.

Some of the notable benefits of Heroku include:

  • Users can get started with the free tier of Heroku
  • Let developers concentrate on coding and not server management
  • Integrates with familiar developer workflows
  • Enhance the productivity of cloud app development teams
  • Helps your development, QA, and business stakeholders create a unified dashboard
  • Support for Modern Open Source Languages

Step 1. Create Redis Enterprise Cloud

Create your free Redis Enterprise Cloud account by visiting this link

tip

For a limited time, use TIGER200 to get $200 credits on Redis Enterprise Cloud and try all the advanced capabilities!

🎉 Click here to sign up

recloud

Follow this link to create a Redis Enterprise Cloud subscription and database. Once you create the database, you will be provisioned with a unique database endpoint URL, port and password. Save these for future reference.

Step 2. Create a Heroku account

If you are using Heroku for the first time, create your new Heroku account through this link

heroku

Step 3. Install Heroku CLI on your system

 brew install heroku

Step 4. Login to Heroku

 heroku login
heroku: Press any key to open up the browser to login or q to exit:
Opening browser to https://cli-auth.heroku.com/auth/cli/browser/XXXXXXXXXXA
Logging in... done
Logged in as your_email_address

Step 5. Connect your application to Redis Enterprise Cloud

For this demonstration, we will be using a Sample Rate Limiting application.

Clone the repository

 git clone https://github.com/redis-developer/basic-rate-limiting-demo-java
heroku create
Creating app... done, ⬢ hidden-woodland-03996
https://hidden-woodland-03996.herokuapp.com/ | https://git.heroku.com/hidden-woodland-03996.git

Step 6. Setting up Environment Variables

Go to Heroku dashboard, click "Settings" and set REDIS_ENDPOINT_URI and REDIS_PASSWORD under the Config Vars. Refer to Step 1 for the correct values to use.

heroku

You now have a functioning Git repository that contains a simple application as well as a package.json file, which is used by Node’s dependency manager.

Step 7. Deploy your code

Heroku generates a random name (in this case hidden-woodland-03996) for your app, or you can pass a parameter to specify your own app name. Now deploy your code:

$ git push heroku
remote: BUILD SUCCESSFUL in 1m 5s
remote: 12 actionable tasks: 12 executed
remote: -----> Discovering process types
remote: Procfile declares types -> web
remote:
remote: -----> Compressing...
remote: Done: 298.9M
remote: -----> Launching...
remote: Released v3
remote: https://hidden-woodland-03996.herokuapp.com/ deployed to Heroku
remote:
remote: Verifying deploy... done.
To https://git.heroku.com/hidden-woodland-03996.git
* [new branch] master -> master

Step 8. Accessing the application

Open https://hidden-woodland-03996.herokuapp.com/ to see your application

heroku

Next Steps

- + \ No newline at end of file diff --git a/create/heroku/herokunodejs/index.html b/create/heroku/herokunodejs/index.html index 458ceb5631..1463ebe9b8 100644 --- a/create/heroku/herokunodejs/index.html +++ b/create/heroku/herokunodejs/index.html @@ -4,7 +4,7 @@ Deploy a NodeJS app on Heroku using Redis | The Home of Redis Developers - + @@ -13,7 +13,7 @@

Deploy a NodeJS app on Heroku using Redis


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Heroku is a platform as a service (PaaS) that enables developers to build, run, and operate applications entirely in the cloud. It is a platform for data as well as apps - providing a secure, scalable database-as-a-service with tons of developers tools like database followers, forking, dataclips and automated health checks.Heroku is widely popular as it makes the processes of deploying, configuring, scaling, tuning, and managing apps as simple and straightforward as possible, so that developers can focus on what’s most important: building great apps that delight and engage customers.

Step 1. Create Redis Enterprise Cloud

Create your free Redis Enterprise Cloud account by visiting this link

tip

For a limited time, use TIGER200 to get $200 credits on Redis Enterprise Cloud and try all the advanced capabilities!

🎉 Click here to sign up

recloud

Follow this link to create a Redis Enterprise Cloud subscription and database. Once you create the database, you will be provisioned with a unique database endpoint URL, port and password. Save these for future reference.

Step 2. Create a Heroku account

If you are using Heroku for the first time, create your new Heroku account through this link

heroku

Step 3. Install Heroku CLI on your system

 brew install heroku

Step 4. Login to Heroku

 heroku login
heroku: Press any key to open up the browser to login or q to exit:
Opening browser to https://cli-auth.heroku.com/auth/cli/browser/XXXXXXXXXXA
Logging in... done
Logged in as your_email_address

Step 5. Connect your application to Redis Enterprise Cloud

For this demonstration, we will be using a Sample Rate Limiting application

Clone the repository

 git clone https://github.com/redis-developer/basic-redis-rate-limiting-demo-nodejs

Run the commands below to get a functioning Git repository that contains a simple application as well as a package.json file.

heroku create
Creating app... done, ⬢ rocky-lowlands-06306
https://rocky-lowlands-06306.herokuapp.com/ | https://git.heroku.com/rocky-lowlands-06306.git

Step 6. Setting up environment variables

Go to the Heroku dashboard, click "Settings" and set REDIS_ENDPOINT_URI and REDIS_PASSWORD under the Config Vars. Refer to Step 1 for the correct values to use.

heroku

You now have a functioning Git repository that contains a simple application as well as a package.json file, which is used by Node’s dependency manager.

Step 7. Deploy your code

$ git push heroku

Wait for few seconds and you will see the messages below:

remote: -----> Launching...
remote: Released v3
remote: https://rocky-lowlands-06306.herokuapp.com/ deployed to Heroku
remote:
remote: Verifying deploy... done.
To https://git.heroku.com/rocky-lowlands-06306.git
* [new branch] main -> main

Step 8. Accessing the application

Open https://rocky-lowlands-06306.herokuapp.com/ to see your application

heroku

Next Steps

- + \ No newline at end of file diff --git a/create/heroku/herokupython/index.html b/create/heroku/herokupython/index.html index f39d9d43b5..222091b73e 100644 --- a/create/heroku/herokupython/index.html +++ b/create/heroku/herokupython/index.html @@ -4,7 +4,7 @@ Deploy a Python app on Heroku using Redis | The Home of Redis Developers - + @@ -15,7 +15,7 @@ You can refer to Heroku documentation

heroku

Step 3. Install Heroku CLI on your system

Run the following command to install the Heroku CLI on your system.

 brew install heroku

Step 4. Login to Heroku

 heroku login
heroku: Press any key to open up the browser to login or q to exit:
Opening browser to https://cli-auth.heroku.com/auth/cli/browser/XXXXXXXXXXA
Logging in... done
Logged in as your_email_address

Step 5. Connect your application to Redis Enterprise Cloud

For this demonstration, we will be using a Sample Rate Limiting application.

Clone the repository

 git clone https://github.com/redis-developer/basic-rate-limiting-demo-python

Run the commands below to get a functioning Git repository that contains a simple application as well as a package.json file.

$ heroku create
Creating app... done, ⬢ fast-reef-76278
https://fast-reef-76278.herokuapp.com/ | https://git.heroku.com/fast-reef-76278.git

Step 6. Setting up Environment Variables

Go to the Heroku dashboard, click "Settings" and set REDIS_ENDPOINT_URI and REDIS_PASSWORD under the Config Vars. Refer to Step 1 for the correct values to use.

heroku

Step 7. Deploy your code

Heroku generates a random name (in this case fast-reef-76278) for your app, or you can pass a parameter to specify your own app name. Now deploy your code:

$ git push heroku
Enumerating objects: 512, done.
Counting objects: 100% (512/512), done.
Delta compression using up to 12 threads
Compressing objects: 100% (256/256), done.
Writing objects: 100% (512/512), 1.52 MiB | 660.00 KiB/s, done.
Total 512 (delta 244), reused 512 (delta 244)
remote: Compressing source files... done.
remote: Building source:
remote:
remote: -----> Building on the Heroku-20 stack
remote: -----> Determining which buildpack to use for this app
remote: -----> Python app detected


emote: -----> Compressing...
remote: Done: 59.3M
remote: -----> Launching...
remote: Released v5
remote: https://fast-reef-76278.herokuapp.com/ deployed to Heroku
remote:
remote: Verifying deploy... done.
To https://git.heroku.com/fast-reef-76278.git
* [new branch] master -> master

Step 8. Accessing the application

Open https://fast-reef-76278.herokuapp.com/ to see your application

heroku

Next Steps

- + \ No newline at end of file diff --git a/create/heroku/herokuruby/index.html b/create/heroku/herokuruby/index.html index 9be6496452..cac9fa33a7 100644 --- a/create/heroku/herokuruby/index.html +++ b/create/heroku/herokuruby/index.html @@ -4,7 +4,7 @@ Deploy a Ruby app on Heroku using Redis | The Home of Redis Developers - + @@ -13,7 +13,7 @@

Deploy a Ruby app on Heroku using Redis


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Heroku is a popular PaaS offering that allows software developers to easily deploy their code without worrying about the underlying infrastructure. By using a simple 'git push heroku' command, developers are able to deploy their application flawlessly. This platform offers support for a wide range of programming languages such as Java, Ruby, PHP, Node.js, Python, Scala, and Clojure.

Here's a quickstart guide to deploy Ruby apps on Heroku using Redis. We will be deploying a sample Leaderboard app and will be using company valuation and stock tickers as its domain.

Step 1. Create a Redis Enterprise Cloud Database

Create your free Redis Enterprise Cloud account by visiting this link.

tip

For a limited time, use TIGER200 to get $200 credits on Redis Enterprise Cloud and try all the advanced capabilities!

🎉 Click here to sign up

recloud

Follow this link to create a Redis Enterprise Cloud subscription and database. Once you create the database, you will be provisioned with a unique database endpoint URL, port and password. Save these for future reference.

Step 2. Create a Heroku account

If you are using Heroku for the first time, create your new Heroku account through this link.

Step 3. Install the Heroku CLI on your system

 brew install heroku

Step 4. Login to Heroku

 heroku login
heroku: Press any key to open up the browser to login or q to exit:
Opening browser to https://cli-auth.heroku.com/auth/cli/browser/XXXXXXXXXXA
Logging in... done
Logged in as your_email_address

Step 5. Connect your application to Redis Enterprise Cloud

For this demonstration, we will be using a Sample Redis Leaderboard app.

Clone the repository

 git clone https://github.com/redis-developer/basic-redis-leaderboard-demo-ruby

Run the commands below to get a functioning Git repository that contains a simple application as well as a app.json file.

heroku create
Creating app... done, ⬢ thawing-shore-07338
https://thawing-shore-07338.herokuapp.com/ | https://git.heroku.com/thawing-shore-07338.git

Step 6. Setting up Environment Variables

Go to the Heroku dashboard, click "Settings" and set REDIS_HOST and REDIS_PASSWORD under the Config Vars. Refer to Step 1 for the correct values to use.

heroku

You now have a functioning Git repository that contains a simple application as well as a app.json file, which is used by Node’s dependency manager.

Step 7. Deploy your code

$ git push heroku

Wait for few seconds and you will see the messages below:

remote: -----> Discovering process types
remote: Procfile declares types -> (none)
remote: Default types for buildpack -> console, rake, web
remote:
remote: -----> Compressing...
remote: Done: 125.9M
remote: -----> Launching...
remote: Released v10
remote: https://thawing-shore-07338.herokuapp.com/ deployed to Heroku
remote:
remote: Verifying deploy... done.
To https://git.heroku.com/thawing-shore-07338.git
* [new branch] master -> master

Step 8. Accessing the application

Open https://thawing-shore-07338.herokuapp.com/ to access your application on the browser.

note

The Web URL is unique, hence it will be different in your case.

heroku

How does it work?

How the data is stored:

  • The AAPL's details - market cap of 2.6 triillions and USA origin - are stored in a Redis hash like this:

     HSET "company:AAPL" symbol "AAPL" market_cap "2600000000000" country USA
  • The market capitalization for each company is also stored in a ZSET (Redis Sorted Set).

     ZADD  companyLeaderboard 2600000000000 company:AAPL

How the data is accessed:

  • Top 10 companies:

     ZREVRANGE companyLeaderboard 0 9 WITHSCORES
  • All companies:

     ZREVRANGE companyLeaderboard 0 -1 WITHSCORES
  • Bottom 10 companies:

     ZRANGE companyLeaderboard 0 9 WITHSCORES
  • Between rank 10 and 15:

     ZREVRANGE companyLeaderboard 9 14 WITHSCORES
  • Show rank for AAPL, FB and TSLA:

     ZREVRANGE  companyLeaderBoard company:AAPL company:FB company:TSLA
  • Add 1 billion to the market cap of the FB company:

     ZINCRBY companyLeaderBoard 1000000000 "company:FB"
  • Reduce the market cap of the FB company by 1 billion:

     ZINCRBY companyLeaderBoard -1000000000 "company:FB"
  • How many companies have a market cap between 500 billion and 1 trillion?:

     ZCOUNT companyLeaderBoard 500000000000 1000000000000
  • How many companies have a market cap over a Trillion?:

     ZCOUNT companyLeaderBoard 1000000000000 +inf

Next Steps

- + \ No newline at end of file diff --git a/create/heroku/index.html b/create/heroku/index.html index a0a0cdab11..ad83e9fc66 100644 --- a/create/heroku/index.html +++ b/create/heroku/index.html @@ -4,7 +4,7 @@ Overview | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Overview

The following links provide you with the available options to run apps on Heroku using Redis:

Create a Redis database on Heroku
How to deploy a NodeJS based application on Heroku using Redis
How to deploy a Python app on Heroku using Redis
How to deploy a Java app on Heroku using Redis
How to deploy a Redis Rate Limiting app on Heroku using Go
How to deploy a Ruby app on Heroku using Redis
How to deploy a Go app on Heroku using Redis
- + \ No newline at end of file diff --git a/create/heroku/portal/index.html b/create/heroku/portal/index.html index 541bc602cd..5e61dc238d 100644 --- a/create/heroku/portal/index.html +++ b/create/heroku/portal/index.html @@ -4,7 +4,7 @@ Create a Redis database on Heroku | The Home of Redis Developers - + @@ -14,7 +14,7 @@

Create a Redis database on Heroku


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Heroku is a cloud Platform as a Service (PaaS) supporting multiple programming languages that is used as a web application deployment model. Heroku lets the developer build, run and scale applications in a similar manner across all the languages(Java, Node.js, Scala, Clojure, Python, PHP, Ruby and Go).

Using Redis Enterprise Cloud directly

Redis is an open source, in-memory, key-value data store most commonly used as a primary database, cache, message broker, and queue. Redis cache delivers sub-millisecond response times, enabling fast and powerful real-time applications in industries such as gaming, fintech, ad-tech, social media, healthcare, and IoT.

Redis Cloud is a fully-managed cloud service for hosting and running your Redis dataset in a highly-available and scalable manner, with predictable and stable top performance. Redis Enterprise cloud allows you to run Redis server over the Cloud and access instance via multiple ways like RedisInsight, redis command line as well as client tools. You can quickly and easily get your apps up and running with Redis Cloud through its Redis Heroku addons , just tell us how much memory you need and get started instantly with your first Redis database. You can then add more Redis databases (each running in a dedicated process, in a non-blocking manner) and increase or decrease the memory size of your plan without affecting your existing data.

::tip INFO Heroku addons are set of tools and services for developing, extending, and operating your app. :::

You can quickly and easily get your apps up and running with Redis Enterprise Cloud directly. Follow the below steps:

Step 1. Create Redis Enterprise Cloud

Create your free Redis Enterprise Cloud account by visiting this link

tip

For a limited time, use TIGER200 to get $200 credits on Redis Enterprise Cloud and try all the advanced capabilities!

🎉 Click here to sign up

recloud

Follow this link to create a Redis Enterprise Cloud subscription and database. Once you create the database, you will be provisioned with a unique database endpoint URL, port and password. Save these for future reference.

Before you proceed with heroku redis, ensure that you can connect to Redis instance and verify if it is accessible via redis-cli command. You can run info command that is available in redis client software to see the version, memory usage, stats, and modules enabled in the Redis cloud database.

Step 2. Create a Heroku account

If you are using Heroku for the first time, create your new Heroku account through this link.

heroku

Step 3. Install Heroku CLI on your system

 brew install heroku

Step 4. Login to Heroku

 heroku login
heroku: Press any key to open up the browser to login or q to exit:
Opening browser to https://cli-auth.heroku.com/auth/cli/browser/XXXXXXXXXXA
Logging in... done
Logged in as your_email_address

Step 5. Connect your application to Redis Enterprise Cloud

For this demonstration, we will be using a Sample Rate Limiting application.

Clone the repository

 git clone https://github.com/redis-developer/basic-rate-limiting-demo-python

Run the commands below to get a functioning Git repository that contains a simple application as well as a package.json file.

 heroku create
Creating app... done, ⬢ lit-bayou-75670
https://lit-bayou-75670.herokuapp.com/ | https://git.heroku.com/lit-bayou-75670.git

heroku

Step 6. Setting up environment variables

Follow this link to create a Redis Enterprise Cloud subscription and database connection as shown below: Go to the Heroku dashboard, click "Settings" and set REDIS_URL and REDIS_PASSWORD under the Config Vars.

note

The Redis URL endpoint is unique and might be different in your case. Please enter the values accordingly

Refer to Step 1 for the correct values to use.

heroku

Step 7. Pushing the code to Git

 git push heroku
remote: -----> Build succeeded!
remote: -----> Discovering process types
remote: Procfile declares types -> web
remote:
remote: -----> Compressing...
remote: Done: 32.9M
remote: -----> Launching...
remote: Released v5
remote: https://lit-bayou-75670.herokuapp.com/ deployed to Heroku
remote:
remote: Verifying deploy... done.
To https://git.heroku.com/lit-bayou-75670.git
* [new branch] main -> main

Check the logs:

 heroku logs --tail
2021-03-27T03:48:30.000000+00:00 app[api]: Build succeeded
2021-03-27T03:48:33.956884+00:00 heroku[web.1]: Starting process with command `node server/index.js`
2021-03-27T03:48:36.196827+00:00 app[web.1]: App listening on port 11893

Step 8. Accessing the app

heroku

Next Steps

Redis Launchpad
- + \ No newline at end of file diff --git a/create/heroku/ratelimiting-go/index.html b/create/heroku/ratelimiting-go/index.html index abaceee59d..d634c5727d 100644 --- a/create/heroku/ratelimiting-go/index.html +++ b/create/heroku/ratelimiting-go/index.html @@ -4,7 +4,7 @@ Deploy a Redis Rate Limiting app on Heroku | The Home of Redis Developers - + @@ -15,7 +15,7 @@ CookieName: user-limiter CookieValue: md5(<current time>) <current time> - request time in a format: 2006-01-02 15:04:05.999999999 -0700 MST

Redis Commands

  • Read requests for user by user-limiter cookie: GET requests.<USER_IDENTIFIER> - get USER_IDENTIFIER from request cookie
    • E.g GET requests.0cbc6611f5540bd0809a388dc95a615b
  • Set request counter with expired 10 sec if not exist in requests.<USER_IDENTIFIER>: SETEX requests.<USER_IDENTIFIER> 10 0
    • E.g SETEX requests.0cbc6611f5540bd0809a388dc95a615b 10 0
  • Increment requests counter for each of user request: INC requests.<USER_IDENTIFIER>
    • E.g INC requests.0cbc6611f5540bd0809a388dc95a615b
  • Get requests number for user: GET requests.<USER_IDENTIFIER>
    • E.g GET requests.0cbc6611f5540bd0809a388dc95a615b

Code for Rate Limiting

func (c Controller) AcceptedRequest(user string, limit int) (int, bool) {
key := c.key(user)

if _, err := c.r.Get(key); err == redis.Nil {
err := c.r.Set(key, "0", time.Second * time.Duration(limit))
if err != nil {
log.Println(err)
return 0, false
}
}

if err := c.r.Inc(key); err != nil {
log.Println(err)
return 0, false
}

requests, err := c.r.Get(key)
if err != nil {
log.Println(err)
return 0,false
}
requestsNum, err := strconv.Atoi(requests)
if err != nil {
log.Println(err)
return 0, false
}

if requestsNum > limit {
return requestsNum, false
}

return requestsNum, true
}

Where c corresponds to the active controller and c.r is a Redis client.

Response

Status codes

  • 200 - OK - responded PONG
  • 406 - Not Acceptable - could not read cookie from request, returned when cookies are not allowed on the client side
  • 429 - Too Many Requests - user send more than 10 requests / 10sec

Headers

  • X-RateLimit-Limit: 10 - allowed number of limits per 10sec
  • X-RateLimit-Remaining: 9 - number of left request in 10sec window

Available commands

References

- + \ No newline at end of file diff --git a/create/homebrew/index.html b/create/homebrew/index.html index b703097a20..9aed0dd1b3 100644 --- a/create/homebrew/index.html +++ b/create/homebrew/index.html @@ -4,7 +4,7 @@ Brew Install Redis | Redis Developer Hub - + @@ -14,7 +14,7 @@

Create a Redis database on Mac OS


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

To install Redis Stack on mac OS, use Homebrew. Make sure that you have Homebrew installed before starting on the installation instructions below.

Follow the instructions below to setup Redis Stack on your Mac OS:

Step 1. Install Redis Stack using Homebrew

First, tap the Redis Stack Homebrew tap and then run brew install as shown below:

 brew tap redis-stack/redis-stack
brew install --cask redis-stack

This will install all Redis and Redis Stack binaries. How you run these binaries depends on whether you already have Redis installed on your system.

 ==> Installing Cask redis-stack-redisinsight
==> Moving App 'RedisInsight-preview.app' to '/Applications/RedisInsight-preview.app'
🍺 redis-stack-redisinsight was successfully installed!
==> Installing Cask redis-stack
🍺 redis-stack was successfully installed!
INFO

If this is the first time you’ve installed Redis on your system, then all Redis Stack binaries will be installed and accessible from the $PATH. On M1 Macs, this assumes that /opt/homebrew/bin is in your path. On Intel-based Macs, /usr/local/bin should be in your path.

To check this, run:

 echo $PATH

Then, confirm that the output contains /opt/homebrew/bin (M1 Mac) or /usr/local/bin (Intel Mac). If these directories are not in the output, see the “Existing Redis installation” instructions below.

Start Redis Stack Server

You can now start Redis Stack Server as follows:

 redis-stack-server

Existing Redis installation

If you have an existing Redis installation on your system, then you’ll need to modify your PATH environment variable to ensure that you’re using the latest Redis Stack binaries.

Open the file ~/.bashrc or ~/zshrc (depending on your shell), and add the following line.

  export PATH=/usr/local/Caskroom/redis-stack-server/<VERSION>/bin:$PATH

Go to Applications and click "RedisInsight Preview" to bring up the Redis Desktop GUI tool.

Step 2. Add Redis database

access redisinsight

Step 3. Enter Redis database details

Add the local Redis database endpoint and port.

access redisinsight

Step 5. Redis for time series

Redis Stack provides you with a native time series data structure. Let's see how a time series might be useful in our bike shop.

As we have multiple physical shops too, alongside our online shop, it could be helpful to have an overview of the sales volume. We will create one time series per shop tracking the total amount of all sales. In addition, we will mark the time series with the appropriate region label, east or west. This kind of representation will allow us to easily query bike sales performance per certain time periods, per shop, per region or across all shops.

Click "Guides" icon (just below the key) in the left sidebar and choose "Redis for the time series" for this demonstration.

redis for timeseries

Step 6. Create time series per shop

 TS.CREATE bike_sales_1 DUPLICATE_POLICY SUM LABELS region east compacted no
TS.CREATE bike_sales_2 DUPLICATE_POLICY SUM LABELS region east compacted no
TS.CREATE bike_sales_3 DUPLICATE_POLICY SUM LABELS region west compacted no
TS.CREATE bike_sales_4 DUPLICATE_POLICY SUM LABELS region west compacted no
TS.CREATE bike_sales_5 DUPLICATE_POLICY SUM LABELS region west compacted no

As shown in the following query, we make the shop id (1,2,3,4,5) a part of the time series name. You might also notice the DUPLICATE_POLICY SUM argument; this describes what should be done when two events in the same time series share the same timestamp: In this case, it would mean that two sales happened at exactly the same time, so the resulting value should be a sum of the two sales amounts.

Since the metrics are collected with a millisecond timestamp, we can compact our time series into sales per hour:

create time series per shop

Step 7. Running the query

execute the query

Step 8. Time series compaction

Redis Time Series supports downsampling with the following aggregations: avg, sum, min, max, range, count, first and last. If you want to keep all of your raw data points indefinitely, your data set grows linearly over time. However, if your use case allows you to have less fine-grained data further back in time, downsampling can be applied. This allows you to keep fewer historical data points by aggregating raw data for a given time window using a given aggregation function.

Example:

 TS.CREATERULE bike_sales_5 bike_sales_5_per_day AGGREGATION sum 86400000

time series compaction

Redis Launchpad
- + \ No newline at end of file diff --git a/create/images/index.html b/create/images/index.html index 965a76cef6..7b03045574 100644 --- a/create/images/index.html +++ b/create/images/index.html @@ -4,7 +4,7 @@ images | The Home of Redis Developers - + @@ -12,7 +12,7 @@
- + \ No newline at end of file diff --git a/create/index.html b/create/index.html index 2a2154e3dd..069bfc3235 100644 --- a/create/index.html +++ b/create/index.html @@ -4,7 +4,7 @@ Create a Redis Database - Quick Starts | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Create a Redis Database - Quick Starts

The following quick starts shows various ways of how to get started and create a new Redis database:

Create database using Redis Stack
Getting started with Redis Functions
Create database using Redis Enterprise Cloud
Create Redis database on Heroku
Create Redis database on Azure Cache
Create Redis database using Google Cloud
Create Redis database on AWS
Create Redis database on Docker
Create Redis database on Kubernetes Platform
Create Redis database from Source
Create Redis database on Mac using Homebrew
- + \ No newline at end of file diff --git a/create/jenkins/index.html b/create/jenkins/index.html index 50bb87d2f4..35f5983882 100644 --- a/create/jenkins/index.html +++ b/create/jenkins/index.html @@ -4,7 +4,7 @@ How to Deploy a Redis Enterprise Database from a Jenkins Pipeline | The Home of Redis Developers - + @@ -12,7 +12,7 @@

How to Deploy a Redis Enterprise Database from a Jenkins Pipeline


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis
Profile picture for Matthew Royal
Author:
Matthew Royal, Consulting Engineer at Redis

Jenkins is currently the most popular CI(Continuous Integration) tool, with ~15M users. It is an open source automation server which enables developers to reliably build, test, and deploy their software. It was forked in 2011 from a project called Hudson after a dispute with Oracle, and is used for Continuous Integration and Continuous Delivery (CI/CD) and test automation. Jenkins is based on Java and provides over 1700 plugins to automate your developer workflow and save a lot of your time in executing your repetitive tasks.

image

Source: Datanyze market analysis

Jenkins Pipeline performs Continuous Delivery tasks declared in a Jenkinsfile stored alongside code. The Pipeline plugin has a fairly comprehensive tutorial checked into its source tree. Plugins are the primary means of enhancing the functionality of a Jenkins environment to suit organization- or user-specific needs. Using a Pipeline, you can configure Jenkins to automatically deploy key pieces of infrastructure, such as a Redis database.

Architecture

Jenkins Pipelines are the Continuous Delivery (CD) side of Jenkins. They use a Jenkinsfile declarative script to define the behavior of the pipeline. You can script actions in Groovy and run shell scripts from it, so you can make it do pretty much anything.

The Jenkinsfile instructs Jenkins to export some environment variables from the Credentials store in order to connect to the Redis server, then executes the Python pipeline script with the Deployment Configuration file given as a parameter. An example deployment-configuration-file.json looks like:

{
"database": {
"name": "made-with-jenkins",
"port": 12345,
"size": "S",
"operation": "CREATE"
}
}

The Python script uses predefined JSON template files that create Redis databases of fixed t-shirt sizes (S, M, L, XL). The Deployment Config file tells the Python script what the desired database name, port, and size are. A sample template file looks like:

{
"name": "{NAME}",
"type": "redis",
"memory_size": 343597383
}

The following is an architectural diagram of how a Jenkins pipeline adds a database to a Redis cluster.

alt_text

Process

  1. The Jenkins pipeline clones a remote git repository, containing the application code and the pipeline code.
  2. The Redis host, port, user, and password are decrypted from the credentials store and are exported as Environment variables.
  3. Jenkins runs the Python pipeline script, specifying the deployment configuration file in the git repo.
  4. The Python script uses the deployment configuration file to choose and customize a pre-populated template to use as the body of the REST create database request to Redis.

List of Pipeline Code Files

Configuring Jenkins

Installing Jenkins

You can use Docker Desktop to quickly get a Jenkins instance up and running, exposing ports 8080 (web GUI) and 50000 (inbound agents).

docker run --name jenk -p 8080:8080 -p 50000:50000 jenkins/jenkins:lts-jdk11

The installation will generate a first-run password in the docker-cli output.

Then open the Jenkins URL http://localhost:8080/ and enter the password to unlock your instance and begin installation.

alt_text

Choose "Install suggested plugins" to perform the Jenkins configuration.

alt_text

Wait for the plugins to complete the installation process.

alt_text

Next, you’re prompted to create your admin user.

alt_text

Congratulations! Jenkins is ready!

alt_text

Installing Python and custom libraries

If you use an existing instance of Jenkins server, you can install Python and the custom libraries from the command line interface of that machine.

Docker instances of Jenkins can be accessed by shell using the following command:

docker exec -it -u root jenk bash

The Python pipeline script requires the libraries click and requests. It also requires Python.

apt-get update
apt-get install -y python3-pip

pip install --upgrade pip
pip install click
pip install requests

Alternatively, if you are creating a new Jenkins from scratch, you can include these dependencies in a separate Dockerfile that builds off the base Jenkins image:

FROM jenkins:latest
USER root
RUN apt-get update
RUN apt-get install -y python-pip

# Install app dependencies
RUN pip install --upgrade pip
RUN pip3 install click
RUN pip3 install requests

Add credentials to Secret Store

Using the left-side menu, select Manage Jenkins, then select Manage Credentials, then click the link (global).

alt_text

alt_text

alt_text

From here, you can specify Kind: Secret text for the 4 secrets required to connect with the Redis REST endpoint:

  • REDIS_SERVER_FQDN
    • Set to the 'https://server-address' of the target Redis instance.
  • REDIS_SERVER_PORT
    • Set to the Redis REST API port (default 9443).
  • REDIS_USER
    • Set to the Redis admin user allowed to create databases.
  • REDIS_PASS
    • Set to the Redis admin user's password.

alt_text

If you are using a private code repository, you may also wish to include a Personal Access Token here.

Create the Jenkins pipeline

From the dashboard, click New Item.

alt_text

Enter in a name for the pipeline, and choose the Pipeline type.

alt_text

Connect GitHub repository

From the Pipeline configuration page that appears, check the GitHub box and enter the git clone URL, complete with any credentials needed to read the repository. For GitHub access, the password should be a Personal Access Token rather than the actual user password.

alt_text

Redis pipeline Jenkinsfile

Scrolling down on this page to the Advanced Project Options, you can either past in the Jenkinsfile, or you can specify the filename if the file exists in the git repository.

alt_text

Here is an example Jenkinsfile containing the mapping of Credentials to the environment variables, and 2 separate stages – a Hello World which always succeeds, and a build stage that invokes the Python script. Paste this into the pipeline script section.

pipeline {
agent any

environment {
REDIS_SERVER_FQDN = credentials('REDIS_SERVER_FQDN')
REDIS_SERVER_PORT = credentials('REDIS_SERVER_PORT')
REDIS_USER = credentials('REDIS_USER')
REDIS_PASS = credentials('REDIS_PASS')
}

stages {
stage('Hello') {
steps {
echo 'Hello World'
}
}

stage('build') {
steps {
git branch: 'main', url: 'https://github.com/masyukun/redis-jenkins-pipeline.git'
sh 'python3 jenkins-re-pipeline.py --deployfile deployment-configuration-file.json'
}
}
}
}

Click "Save" when the job spec is complete.

Run the Jenkins pipeline

Click on the pipeline you created:

alt_text

Click the "Build Now" icon on the left side menu.

alt_text

Click the Status icon on the left side menu in order to see the results of all the output from each of the stages of your pipeline.

alt_text

Hover over the build stage and click the Logs button of the most recent build in order to see the Python script’s output.

alt_text

Sample output: you should see a verbose response from Redis’s REST service in the “Shell Script” accordion pane.

There’s also a “Git” output log, in case you need to debug something at that level. Any time you update the branch in the remote git repository, you should see evidence in that log that the latest changes have successfully checked out into the local Jenkins git repository.

alt_text

Open your Redis Enterprise Secure Management UI at https://servername:8443 and click on the databases menu item to verify that your database was created with the name, port, and size specified in the deployment-configuration-file.json file.

alt_text

Congratulations! You have deployed a Redis Enterprise database using a Jenkins Pipeline!

The GitHub repository is currently: https://github.com/masyukun/redis-jenkins-pipeline

Further Reading

- + \ No newline at end of file diff --git a/create/kubernetes/index.html b/create/kubernetes/index.html index a2e32970fd..9acf060f10 100644 --- a/create/kubernetes/index.html +++ b/create/kubernetes/index.html @@ -4,7 +4,7 @@ Overview | The Home of Redis Developers - + @@ -12,7 +12,7 @@ - + \ No newline at end of file diff --git a/create/kubernetes/kubernetes-gke/index.html b/create/kubernetes/kubernetes-gke/index.html index 74096095b4..3f7d49edf9 100644 --- a/create/kubernetes/kubernetes-gke/index.html +++ b/create/kubernetes/kubernetes-gke/index.html @@ -4,7 +4,7 @@ Create a Redis database on Google Kubernetes Engine | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Create a Redis database on Google Kubernetes Engine


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Step 1. Pre-requisites

Step 2. Ensure that gcloud is installed on your local Linux system:

$ gcloud -v
Google Cloud SDK 320.0.0
alpha 2020.12.04
app-engine-go 1.9.71
app-engine-java 1.9.84
app-engine-python 1.9.91
app-engine-python-extras 1.9.91

Step 3. Create a 5 Node GKE cluster:

$ gcloud container clusters create testredis  --subnetwork default --num-nodes 5 --machine-type e2-standard-8 --enable-basic-auth --region us-east1

Step 4. Create a new namespace

[node1 kubelabs]$ kubectl create namespace demo
namespace/demo created

Step 5. Switch context to the newly created namespace

$ kubectl config set-context --current --namespace=demo
Context "kubernetes-admin@kubernetes" modified.

Step 6. Deploy the operator bundle

To deploy the default installation with kubectl, the following command will deploy a bundle of all the YAML declarations required for the operator. You can download the bundle YAML file via this link:

$ kubectl apply -f bundle.yaml
role.rbac.authorization.k8s.io/redis-enterprise-operator created
rolebinding.rbac.authorization.k8s.io/redis-enterprise-operator created
serviceaccount/redis-enterprise-operator created
customresourcedefinition.apiextensions.k8s.io/redisenterpriseclusters.app.redislabs.com created
deployment.apps/redis-enterprise-operator created
customresourcedefinition.apiextensions.k8s.io/redisenterprisedatabases.app.redislabs.com created

Step 7. Verifying the Deployment:

Run the command below to verify redis-enterprise-operator deployment is running.

kubectl get deployment
NAME READY UP-TO-DATE AVAILABLE AGE
redis-enterprise-operator 1/1 1 1 9m34s

Step 8. Create a Redis Enterprise Cluster

Create a Redis Enterprise Cluster (REC) using the default configuration, which is suitable for development type deployments and works in typical scenarios:

$ kubectl apply -f crds/app_v1_redisenterprisecluster_cr.yaml

redisenterprisecluster.app.redislabs.com/redis-enterprise created

Step 9. Verifying the Redis Enterprise Cluster

The rec is a shortcut for RedisEnterpriseCluster. The cluster takes around 5-10 minutes to come up. Run the command below to check that the RedisEnterpriseCluster is up:

$ kubectl get rec
NAME AGE
redis-enterprise 14s
[node1 redis-enterprise-k8s-docs]$

Step 10. Listing Kubernetes Resources

$ kubectl get po,svc,deploy
NAME READY STATUS RESTARTS AGE
pod/redis-enterprise-0 2/2 Running 0 6m42s
pod/redis-enterprise-1 2/2 Running 0 4m34s
pod/redis-enterprise-2 2/2 Running 0 2m18s
pod/redis-enterprise-operator-58f8566fd7-5kcvz 1/1 Running 0 69m
pod/redis-enterprise-services-rigger-5849b86c65-lwql9 1/1 Running 0 6m42s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.3.240.1 <none> 443/TCP 71m
service/redis-enterprise ClusterIP None <none> 9443/TCP,8001/TCP,8070/TCP 6m42s
service/redis-enterprise-ui LoadBalancer 10.3.246.252 35.196.117.24 8443:31473/TCP 6m42s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/redis-enterprise-operator 1/1 1 1 69m
deployment.apps/redis-enterprise-services-rigger 1/1 1 1 6m44s

You can verify the Pods and list of services using the Google Cloud Dashboard UI:

Redis Enterprise UI

Step 11. Listing the Secrets

kubectl get secrets redis-enterprise -o yaml | grep password | awk '{print $2}'
bXVLeHRpblY=

Step 12. Listing the Password

echo bXVLeHRpblY= | base64 -d

Step 13. Creating a Database

Open https://localhost:8443 in the browser to see the Redis Enterprise Software web console. Click on "Setup", add your preferred DNS and admin credentials and proceed further to create your first Redis database using Redis Enterprise.

Next Steps

Redis Launchpad
- + \ No newline at end of file diff --git a/create/kubernetes/kubernetes-operator/index.html b/create/kubernetes/kubernetes-operator/index.html index 50d56478fa..85e8bcc774 100644 --- a/create/kubernetes/kubernetes-operator/index.html +++ b/create/kubernetes/kubernetes-operator/index.html @@ -4,7 +4,7 @@ Kubernetes Operator: What It Is and Why You Should Really Care About It | The Home of Redis Developers - + @@ -13,7 +13,7 @@

Kubernetes Operator: What It Is and Why You Should Really Care About It


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

My Image

Kubernetes is popular due to its capability to deploy new apps at a faster pace. Thanks to "Infrastructure as data" (specifically, YAML), today you can express all your Kubernetes resources such as Pods, Deployments, Services, Volumes, etc., in a YAML file. These default objects make it much easier for DevOps and SRE engineers to fully express their workloads without the need to learn how to write code in a programming language like Python, Java, or Ruby.

Kubernetes is designed for automation. Out of the box, you get lots of built-in automation from the core of Kubernetes. It can speed up your development process by making easy, automated deployments, updates (rolling update), and by managing your apps and services with almost zero downtime. However, Kubernetes can’t automate the process natively for stateful applications. For example, say you have a stateful workload, such as a database application, running on several nodes. If a majority of nodes go down, you’ll need to reload the database from a specific snapshot following specific steps. Using existing default objects, types, and controllers in Kubernetes, this would be impossible to achieve.

Think of scaling nodes up, or upgrading to a new version, or disaster recovery for your stateful application — these kinds of operations often need very specific steps, and typically require manual intervention. Kubernetes cannot know all about every stateful, complex, clustered application. Kubernetes, on its own, does not know the configuration values for, say, a Redis cluster, with its arranged memberships and stateful, persistent storage. Additionally, scaling stateful applications in Kubernetes is not an easy task and requires manual intervention.

Stateful vs Stateless Applications

Let’s try to understand the difference between stateful versus stateless applications with a simple example. Consider a Kubernetes cluster running a simple web application (without any operator). The YAML file below allows you to create two replicas of NGINX (a stateless application).

 apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
namespace: web
spec:
selector:
matchLabels:
app: nginx
replicas: 2
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80

In the example above, a Deployment object named nginx-deployment is created under a namespace “web,” indicated by the .metadata.name field. It creates two replicated Pods, indicated by the .spec.replicas field. The .spec.selector field defines how the Deployment finds which Pods to manage. In this case, you select a label that is defined in the Pod template (app: nginx). The template field contains the following subfields: the Pods are labeled app: nginx using the .metadata.labels field and the Pod template's specification indicates that the Pods run one container, nginx, which runs the nginx Docker Hub image at version 1.14.2. Finally, it creates one container and names it nginx.

Run the command below to create the Deployment resource:

kubectl create -f nginx-dep.yaml

Let us verify if the Deployment was created successfully by running the following command:

 kubectl get deployments
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deployment 2/2 2 2 63s

The example above shows the name of the Deployment in the namespace. It also displays how many replicas of the application are available to your users. You can also see that the number of desired replicas that have been updated to achieve the desired state is 2.

alt_text

You can run the kubectl describe command to get detailed information of deployment resources. To show details of a specific resource or group of resources:

 kubectl describe deploy
Name: nginx-deployment
Namespace: default
CreationTimestamp: Mon, 30 Dec 2019 07:10:33 +0000
Labels: <none>
Annotations: deployment.kubernetes.io/revision: 1
Selector: app=nginx
Replicas: 2 desired | 2 updated | 2 total | 0 available | 2 unavailable
StrategyType: RollingUpdate
MinReadySeconds: 0
RollingUpdateStrategy: 25% max unavailable, 25% max surge
Pod Template:
Labels: app=nginx
Containers:
nginx:
Image: nginx:1.7.9
Port: 80/TCP
Host Port: 0/TCP
Environment: <none>
Mounts: <none>
Volumes: <none>
Conditions:
Type Status Reason
---- ------ ------
Available False MinimumReplicasUnavailable
Progressing True ReplicaSetUpdated
OldReplicaSets: <none>
NewReplicaSet: nginx-deployment-6dd86d77d (2/2 replicas created)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal ScalingReplicaSet 90s deployment-controller Scaled up replica set nginx-deployment-6dd86d77d to 2

A Deployment is responsible for keeping a set of Pods running, but it’s equally important to expose an interface to these Pods so that the other external processes can access them. That’s where the Service resource comes in. The Service resource lets you expose an application running in Pods to be reachable from outside your cluster. Let us create a Service resource definition as shown below:

apiVersion: v1
kind: Service
metadata:
name: nginx-service
spec:
selector:
app: nginx
ports:
- port: 80
targetPort: 80
type: LoadBalancer

The above YAML specification creates a new Service object named "nginx-service," which targets TCP port 80 on any Pod with the app=nginx label.

 kubectl get svc -n web
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
nginx-service LoadBalancer 10.107.174.108 localhost 80:31596/TCP 46s

alt_text

Let’s scale the Deployment to 4 replicas. We are going to use the kubectl scale command, followed by the deployment type, name, and desired number of instances. The output is similar to this:

kubectl scale deployments/nginx-deployment --replicas=4
deployment.extensions/nginx-deployment scaled

The change was applied, and we have 4 instances of the application available. Next, let’s check if the number of Pods changed. There should now be 4 Pods running in the cluster (as shown in the diagram below)

alt_text

 kubectl get deployments
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deployment 4/4 4 4 4m

There are 4 Pods, with different IP addresses. The change was registered in the Deployment events log.

 kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-deployment-6dd86d77d-b4v7k 1/1 Running 0 4m32s 10.1.0.237 docker-desktop none none
nginx-deployment-6dd86d77d-bnc5m 1/1 Running 0 4m32s 10.1.0.236 docker-desktop none none
nginx-deployment-6dd86d77d-bs6jr 1/1 Running 0 86s 10.1.0.239 docker-desktop none none
nginx-deployment-6dd86d77d-wbdzv 1/1 Running 0 86s 10.1.0.238 docker-desktop none none

Deleting one of the web server Pods triggers work in the control plane to restore the desired state of four replicas. Kubernetes starts a new Pod to replace the deleted one. In this excerpt, the replacement Pod shows a STATUS of ContainerCreating:

 kubectl delete pod nginx-deployment-6dd86d77d-b4v7k

You will notice that the Nginx static web server is interchangeable with any other replica, or with a new Pod that replaces one of the replicas. It doesn’t store data or maintain state in any way. Kubernetes doesn’t need to make any special arrangements to replace a failed Pod, or to scale the application by adding or removing replicas of the server. Now you might be thinking, what if you want to store the state of the application? Great question.

Scaling stateful application is hard

Scaling stateless applications in Kubernetes is easy but it’s not the same case for stateful applications. Stateful applications require manual intervention. Bringing Pods up and down is not that simple. Each Node has an identity and some data attached to it. Removing a Pod means losing its data and disrupting the system.

alt_text

Consider a Kubernetes cluster with 6 worker Nodes hosting a Nginx web application connected to a persistent volume as shown above. Here is the snippet of StatefulSets YAML file:


apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
spec:
serviceName: "nginx"
replicas: 2
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

Kubernetes makes physical storage devices available to your cluster in the form of objects called Persistent Volumes. Each of these Persistent Volumes is consumed by a Kubernetes Pod by issuing a PersistentVolumeClaim object, also known as PVC. A PVC object lets Pods use storage from Persistent Volumes. Imagine a scenario in which we want to downscale a cluster from 5 Nodes to 3 Nodes. Suddenly removing 2 Nodes at once is a potentially destructive operation. This might lead to the loss of all copies of the data. A better way to handle Node removal would be to first migrate data from the Node to be removed to other Nodes in the system before performing the actual Pod deletion. It is important to note that the StatefulSet controller is necessarily generic and cannot possibly know about every possible way to manage data migration and replication. In practice, however, StatefulSets are rarely enough to handle complex, distributed stateful workload systems in production environments.

Now the question is, how to solve this problem? Enter Operators. Operators were developed to handle the sophisticated, stateful applications that the default Kubernetes controllers aren’t able to handle. While Kubernetes controllers like StatefulSets are ideal for deploying, maintaining, and scaling simple stateless applications, they are not equipped to handle access to stateful resources, or to upgrade, resize, and backup of more elaborate clustered applications such as databases. A Kubernetes Operator fills in the gaps between the capabilities and automation provided by Kubernetes and how your software uses Kubernetes for automation of tasks relevant to your software.

An Operator is basically an application-specific controller that can help you manage a Kubernetes application. It is a way to package, run, and maintain a Kubernetes application. It is designed to extend the capabilities of Kubernetes, and also simplify application management. This is especially useful for stateful applications, which include persistent storage and other elements external to the application, and may require extra work to manage and maintain.

tip

The Operator Framework is an open source project that provides developer and runtime Kubernetes tools, enabling you to accelerate the development of an operator. Learn more about operator framework here

Functions of Kubernetes Operators

A Kubernetes Operator uses the Kubernetes API server to create, configure, and manage instances of complex stateful applications on behalf of a Kubernetes user. There is a public repository called OperatorHub.io that is designed to be the public registry for finding Kubernetes Operator backend services. With Operator Hub, developers can easily create an application based on an operator without going through the complexity of crafting an operator from scratch.

alt_text

Below are a few examples of popular Kubernetes Operators and their functions and capabilities.

Kubernetes Operators:

  • Helps you deploy an application on demand (for example, Argo CD operator (Helm is a declarative, GitOps continuous delivery tool for Kubernetes that helps with easy installation and configuration on demand)
  • Helps you install applications with the required configurations and number of application instances
  • Allows you to take and restore backups of the application state (for example, Velero operator manages disaster recovery, backup, and restoration of cluster components such as pv, pvc, deployments, etc., to aid in disaster recovery)
  • Handles the upgrades of the application code plus the changes, such as database schema (for example, Flux is a continuous delivery solution for Kubernetes that allows automating updates to configuration when there is new code to deploy)
  • Can manage a cluster of database servers (for example, MariaDB operator creates MariaDB server and database easily by defining simple custom resource)
  • Can install a database cluster of a declared software version and number of members
  • Scale applications in or out
  • Continues to monitor an application as it runs (for example, Prometheus Operator simplifies the deployment and configuration of Prometheus, Alertmanager, and related monitoring components)
  • Initiate upgrades, automated backups, and failure recovery, simulating failure in all or part of your cluster to test its resilience
  • Allows you to publish a service to applications that don’t support Kubernetes APIs to discover them

How does an Operator work?

Operators work by extending the Kubernetes control plane and API server. Operators allows you to define a Custom Controller that watches your application and performs custom tasks based on its state. The application you want to watch is usually defined in Kubernetes as a new object: a Custom Resource (CR) that has its own YAML spec and object type that is well understood by the API server. That way, you can define any specific criteria in the custom spec to watch out for, and reconcile the instance when it doesn’t match the spec. The way an Operator’s controller reconciles against a spec is very similar to native Kubernetes controllers, though it is using mostly custom components.

What is the Redis Enterprise Operator?

Redis has created an Operator that deploys and manages the lifecycle of a Redis Enterprise Cluster. The Redis Enterprise Operator is the fastest, most efficient way to deploy and maintain a Redis Enterprise cluster in Kubernetes. The Operator creates, configures, and manages Redis Enterprise deployments from a single Kubernetes control plane. This means that you can manage Redis Enterprise instances on Kubernetes just by creating native objects, such as a Deployment, ReplicaSet, StatefulSet, etc. Operators allow full control over the Redis Enterprise cluster lifecycle.

The Redis Enterprise Operator acts as a custom controller for the custom resource Redis Enterprise Cluster, or “REC”, which is defined through Kubernetes CRD (customer resource definition) and deployed with a YAML file.The Redis Enterprise Operator functions as the logic “glue” between the Kubernetes infrastructure and the Redis Enterprise cluster.

How does the Redis Enterprise Operator work?

alt_text

The Redis Enterprise Operator supports two Custom Resource Definitions (CRDs):

  • Redis Enterprise Cluster (REC): An API to create Redis Enterprise clusters. Note that only one cluster is supported per Operator deployment.
  • Redis Enterprise Database (REDB): An API to create Redis databases running on the Redis Enterprise cluster. Note that the Redis Enterprise Operator is namespaced. High-level architecture and overview of the solution can be found HERE.

This is how it works:

  1. First, the Redis Enterprise cluster custom resource (“CR” for short) is read and validated by the operator for a cluster specification.
  2. Secondly, cluster StatefulSet, service rigger, cluster admin secrets, RS/UI services are created.
  3. A Redis Enterprise Database CR is read and validated by the operator.
  4. The database is created on the cluster and the database access credentials are stored in a Kubernetes secret object.
  5. The service rigger discovers the new database and configures the Kubernetes service for the database.
  6. An application workload uses the database secret and service for access to data.

Example of Operator automation

Consider the YAML file below:

apiVersion: app.redislabs.com/v1
kind: RedisEnterpriseCluster
metadata:
name: rec
spec:
# Add fields here
nodes: 3

If you change the number of nodes to 5, the Operator talks to StatefulSets, and changes the number of replicas from 3 to 5. Once that happens, Kubernetes will take over and bootstrap new Nodes one at a time, deploying Pods accordingly. As each becomes ready, the new Nodes join the cluster and become available to Redis Enterprise master Nodes.

alt_text

apiVersion: app.redislabs.com/v1
kind: RedisEnterpriseDatabase
metadata:
name: redis-enterprise-database
spec:
redisEnterpriseCluster:
name: redis-enterprise
Memory: 2G

alt_text

In order to create a database, the Operator discovers the resources, talks to the cluster RestAPI, and then it creates the database. The server talks to the API and discovers it. The DB creates a Redis database service endpoint for that database and that will be available.

In the next tutorial, you will learn how to get started with the Redis Enterprise Kubernetes Operator from scratch, including how to perform non-trivial tasks such as backup, restore, horizontal scaling, and much more. Stay tuned!

References

- + \ No newline at end of file diff --git a/create/linux/index.html b/create/linux/index.html index 7285b53141..84b9e34a3d 100644 --- a/create/linux/index.html +++ b/create/linux/index.html @@ -4,7 +4,7 @@ Create a Redis database on Linux | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Create a Redis database on Linux


Profile picture for Prasan Kumar
Author:
Prasan Kumar, Technical Solutions Developer at Redis

From the official Debian/Ubuntu APT Repository

You can install recent stable versions of Redis Stack from the official packages.redis.io APT repository. The repository currently supports Debian Bullseye (11), Ubuntu Xenial (16.04), Ubuntu Bionic (18.04), and Ubuntu Focal (20.04) on x86 processors. Add the repository to the apt index, update it, and install it:

curl -fsSL https://packages.redis.io/gpg | sudo gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/redis.list
sudo apt-get update
sudo apt-get install redis-stack-server

From the official RPM Feed

You can install recent stable versions of Redis Stack from the official packages.redis.io YUM repository. The repository currently supports RHEL7/CentOS7, and RHEL8/Centos8. Add the repository to the repository index, and install the package.

Create the file /etc/yum.repos.d/redis.repo with the following contents

[Redis]
name=Redis
baseurl=http://packages.redis.io/rpm/rhel7
enabled=1
gpgcheck=1
curl -fsSL https://packages.redis.io/gpg > /tmp/redis.key
sudo rpm --import /tmp/redis.key
sudo yum install epel-release
sudo yum install redis-stack-server
- + \ No newline at end of file diff --git a/create/netlify/deploy-docusaurus-to-netlify/index.html b/create/netlify/deploy-docusaurus-to-netlify/index.html index ebb7d9cbc9..a030c1e633 100644 --- a/create/netlify/deploy-docusaurus-to-netlify/index.html +++ b/create/netlify/deploy-docusaurus-to-netlify/index.html @@ -4,7 +4,7 @@ How to Deploy Docusaurus to Netlify in 5 Minutes | The Home of Redis Developers - + @@ -21,7 +21,7 @@ Choose "Deploy site". If you are performing it for the second time, then choose "Clear cache and deploy site" option.

MyImage

Monitor the "Deploy Log" carefully to see if any error messages appear in the log.

MyImage

You should now be able to see your Docusaurus site hosted on port 3000. MyImage

Step 7. Visit your new Docusaurus site on Netlify

Go to "Sites" on the top navigation menu and click on the latest preview build.

MyImage

You will able to see that Netlify uploads site assets to a content delivery network and makes your site available. MyImage

References

- + \ No newline at end of file diff --git a/create/netlify/getting-started-with-netlify/index.html b/create/netlify/getting-started-with-netlify/index.html index dd6e9f1b51..17db9f29a2 100644 --- a/create/netlify/getting-started-with-netlify/index.html +++ b/create/netlify/getting-started-with-netlify/index.html @@ -4,7 +4,7 @@ Getting Started with Netlify and Redis | The Home of Redis Developers - + @@ -15,7 +15,7 @@ /
  • Once the build process gets completed, Netlify takes the static assets and pushes them to its global CDN for fast delivery.
  • In this tutorial, you will see how to deploy a simple Redis caching app built with Next.js and TailwindCSS to Netlify in 5 minutes.

    Table of Contents

    • Step 1. Setup a Free Redis Enterprise Cloud Account
    • Step 2. Install Netlify CLI
    • Step 3. Clone the GitHub repository
    • Step 4. Login to Netlify via CLI
    • Step 5. Configure Continuous Deployment
    • Step 6. Pushing the changes to GitHub
    • Step 7. Open the Netlify Admin URL
    • Step 8. Add Environment Variable
    • Step 9. Trigger the deployment
    • Step 10. Accessing the app

    Step 1. Setup a free Redis Enterprise Cloud account

    Visit https://developer.redis.com/create/rediscloud/ and create a free Redis Enterprise Cloud account. Select a “Redis Stack" database when you create the Redis Enterprise Cloud database. Once you complete the tutorial, you will be provided with the database Endpoint URL and password. Save it for future reference.

    tip

    For a limited time, use TIGER200 to get $200 credits on Redis Enterprise Cloud and try all the advanced capabilities!

    🎉 Click here to sign up

    rediscloud

    Step 2. Install Netlify CLI

    Netlify’s command line interface (CLI) lets you configure continuous deployment directly from the command line. Run the below command to install Netlify CLI on your local laptop:


    npm install netlify-cli -g

    Verify if Netlify is installed or not by running the below command:

     netlify version
    netlify-cli/8.15.3 darwin-x64 node-v14.17.3

    Step 3. Clone the repository

     git clone https://github.com/redis-developer/nextjs-redis-netlify

    Step 4. Login to Netlify via CLI

    To authenticate and obtain an access token using the command line, run the following command to login to your Netlify account:

     netlify login

    This will open a browser window, asking you to log in with Netlify and grant access to Netlify CLI. Once you authenticate, it will ask you to close the window and display the below results:

    Result
      Already logged in via netlify config on your machine

    Run netlify status for account details

    To see all available commands run: netlify help

    Step 5. Configure continuous deployent

    The netlify init command allows you to configure continuous deployment for a new or existing site. It will also allow you to create netlify.toml file if it doesn't exists.

    netlify init
    Result
    netlify init
    ? What would you like to do? + Create & configure a new site
    ? Team: Redis
    Choose a unique site name (e.g. super-cool-site-by-redisdeveloper.netlify.app) or leave it blank for a random name. You can update the site name later.
    ? Site name (optional): undefined

    Site Created

    Admin URL: https://app.netlify.com/sites/super-cool-site-by-redisdeveloper
    URL: https://super-cool-site-by-redisdeveloper.netlify.app
    Site ID: a70bcfb7-b7b1-4fdd-be8b-5eb3b5dbd404

    Linked to super-cool-site-by-redis-developer in /Users/redisdeveloper/projects/netlify/basic-caching-demo-nodejs/.netlify/state.json
    ? Your build command (hugo build/yarn run build/etc): yarn start
    ? Directory to deploy (blank for current dir): dist
    ? Netlify functions folder: functions
    Adding deploy key to repository...
    Deploy key added!

    Creating Netlify GitHub Notification Hooks...
    Netlify Notification Hooks configured!

    Success! Netlify CI/CD Configured!

    This site is now configured to automatically deploy from github branches & pull requests

    Next steps:

    git push Push to your git repository to trigger new site builds
    netlify open Open the Netlify admin URL of your site

    The above step creates a netlify.toml file with the following content

    netlify.toml
     [build]
    command = "npm run build"
    publish = ".next"

    [[plugins]]
    package = "@netlify/plugin-nextjs"

    Step 6. Pushing the changes to GitHub

    As instructed by Netlify, run the below commands to push the latest changes to GitHub:

    git add .
    git commit -m “Pushing the latest changes”
    git push

    Step 7. Open the Netlify Admin URL

     netlify open --admin

    Step 8. Add Environment Variable for Redis Enterprise Cloud

    Environment Variable

    Step 9. Trigger the deployment

    Click "Trigger deploy" to deploy the site

    MyImage1

    Step 10. Accessing the app

    Click on the deploy URL and you should be able to access the app as shown:

    MyImage

    Try it Yourself

    Deploy to Netlify

    References

    - + \ No newline at end of file diff --git a/create/openshift/index.html b/create/openshift/index.html index 34eb963fc8..65bf96e6fa 100644 --- a/create/openshift/index.html +++ b/create/openshift/index.html @@ -4,7 +4,7 @@ How to deploy Redis Enterprise on Red Hat OpenShift Container Platform via OperatorHub | The Home of Redis Developers - + @@ -17,7 +17,7 @@ Built-in cluster and application monitoring using Prometheus and Grafana dashboards Centralized policy management and enforcement across clusters Built-in security checks for the entire container stack throughout the application life cycle

    Supported Technologies

    OpenShift is fully pluggable and works with a wide range of technologies, including languages such as .NET, Java, Node.js, PHP, Python, Ruby, GoLang, and Perl as well as databases such as MariaDB, MongoDB, MySQL, PostgreSQL, and Redis. In addition, OpenShift is certified with and supports 100+ independent software vendors, including Redis Enterprise.

    What is OperatorHub?

    OperatorHub is the web console interface in OpenShift Container Platform that cluster administrators use to discover, install, and manage operators. In a few clicks, users can subscribe to the software they like via operators and make it available to software developers and platform administrators across clusters or isolated namespace environments using Operator Lifecycle Manager (OLM).

    In this tutorial, you will learn how to deploy Redis Enterprise Cluster and Database on OpenShift using Redis Enterprise Kubernetes Operator.

    Pre-requisites:

    To follow this guide, you should have an OpenShift cluster with 3 or more worker nodes. There are multiple ways to provision an OpenShift cluster, you can follow the installation guide based on your preferred infrastructure or cloud provider. Redis Enterprise Operator currently supports OpenShift 3.11 and OpenShift 4.x. Visit this page to find all the supported Kubernetes distributions.

    In this guide we are using a self-managed OpenShift Cluster on Google Cloud Platform.

    Step 1. Login to Red Hat Openshift Container Platform(OCP)

    You can login to OCP via CLI and user with cluster-admin role.

    $ oc login
    Authentication required for https://api.sumit.openshift.demo.redislabs.com:6443 (openshift)
    Username:
    Password:
    Login successful.

    You have access to 68 projects, the list has been suppressed. You can list all projects with 'oc projects'.

    Step 2. Verify the status of nodes

    Ensure that all the nodes should be in “Ready” status.

    $ oc get nodes
    NAME STATUS ROLES AGE VERSION
    demoopenshift-9x5xq-infra-b-h28tc Ready infra,worker 86m v1.22.3+e790d7f
    demoopenshift-9x5xq-infra-b-wrfgg Ready infra,worker 85m v1.22.3+e790d7f
    demoopenshift-9x5xq-master-0 Ready master 103m v1.22.3+e790d7f
    demoopenshift-9x5xq-master-1 Ready master 103m v1.22.3+e790d7f
    demoopenshift-9x5xq-master-2 Ready master 103m v1.22.3+e790d7f
    demoopenshift-9x5xq-worker-b-6jwx2 Ready worker 97m v1.22.3+e790d7f
    demoopenshift-9x5xq-worker-b-9pxhd Ready worker 95m v1.22.3+e790d7f

    Step 3. Create a New Project

    Redis Enterprise can only be deployed in an user’s custom project. Hence, let us create a new project.

    $ oc new-project myproject
    Now using project "myproject" on server "https://api.sumit.openshift.demo.redislabs.com:6443".

    You can add applications to this project with the 'new-app' command. For example, try:

    oc new-app ruby~https://github.com/sclorg/ruby-ex.git

    to build a new example application in Ruby. Or use kubectl to deploy a simple Kubernetes application:

    kubectl create deployment hello-node --image=gcr.io/hello-minikube-zero-install/hello-node

    Step 4. Clone the Redis Enterprise on Kubernetes Github repository

    $ git clone https://github.com/RedisLabs/redis-enterprise-k8s-docs
    $ cd redis-enterprise-k8s-docs/

    Step 5. Apply the SCC configuration

    The SCC (security context constraint) YAML defines security context constraints for the cluster for our project. This is composed of custom security settings and strategies that will provide access to the Redis Enterprise Operator and Cluster Pods to run on OpenShift. We strongly recommend that you not change anything in this YAML file.

    kind: SecurityContextConstraints
    apiVersion: security.openshift.io/v1
    metadata:
    name: redis-enterprise-scc
    allowPrivilegedContainer: false
    allowedCapabilities:
    - SYS_RESOURCE
    runAsUser:
    type: MustRunAs
    uid: 1001
    FSGroup:
    type: MustRunAs
    ranges: 1001,1001
    seLinuxContext:
    type: RunAsAny

    Apply the configuration to a resource by filename “scc.yaml” which is available in “openshift” folder as shown below:

     $  oc apply -f scc.yaml
    securitycontextconstraints.security.openshift.io/redis-enterprise-scc configured

    Step 6. Provide the operator permissions for Redis Enterprise Operator and Cluster pods

     $ oc adm policy add-scc-to-user redis-enterprise-scc system:serviceaccount:myproject:redis-enterprise-operator
    $ oc adm policy add-scc-to-user redis-enterprise-scc system:serviceaccount:myproject:rec

    Step 7. Deploy Redis Enterprise Operator

    If you want to perform this operation via UI, follow the below steps:

    Login to OpenShift console with admin privileges.

    image

    Click on “OperatorHub” under Operators section.

    image

    This will open up a list of operators with various categories. Search for “Redis” and click “Redis Enterprise” (the one without the “Marketplace” tag) to proceed.

    image

    Click “Install” to start the installation process and then select the required channel, namespace and approval strategy in the next screen.

    image

    Wait for the installation to be completed.

    image

    Click on “Events” to see the streaming events in order to troubleshoot or check the progress of the installation.

    image

    Step 8. Create Redis Enterprise Cluster

    Once Redis Enterprise Operator is installed, create a Redis Enterprise Cluster instance using RedisEnterpriseCluster (REC) custom resource.

    RedisEnterpriseCluster is the schema for the redisenterpriseclusters API. From the installed Redis Enterprise Operator, click on “Create RedisEnterpriseCluster'' and click on “YAML” section, then paste the sample YAML file below:

    image

    apiVersion: app.redislabs.com/v1
    kind: RedisEnterpriseCluster
    metadata:
    name: rec
    namespace: myproject
    spec:
    redisEnterpriseNodeResources:
    limits:
    cpu: 4000m
    memory: 4Gi
    requests:
    cpu: 4000m
    memory: 4Gi
    persistentSpec:
    enabled: true
    storageClassName: standard
    redisEnterpriseImageSpec:
    imagePullPolicy: IfNotPresent
    repository: redislabs/redis
    versionTag: 6.2.8-64.rhel7-openshift
    servicesRiggerSpec:
    databaseServiceType: load_balancer
    serviceNaming: redis-port
    nodes: 3
    uiServiceType: LoadBalancer
    username: demo@redis.com

    For configuring any additional parameters for the Redis Enterprise Cluster custom resource, visit this page.

    Wait until the status shows “Succeeded” under Redis Enterprise Operator.

    image

    Step 9. Access Redis Enterprise Cluster Admin Console

    To access the Redis Enterprise Cluster admin console, you will need credentials. Follow the steps below to get the credentials via “rec” secret resource.

    image

    Under the "Data" section, you can reveal username and password.

    Next, Click on “Services” and choose “rec-ui” under the services list.

    image

    Click on "Services" and look for External Load-balancer IP.

    image

    Step 10. From the web browser, access the rec-ui external IP http://IP:8443 to access the Redis Enterprise Cluster Admin Console.

    image

    To log into Redis Enterprise Cluster, use the credentials that you extracted above. By now, you should be able to see three nodes of the Redis Enterprise cluster.

    image

    Step 11. Create Redis Enterprise Database

    RedisEnterpriseDatabase is the schema for the redisenterprisedatabases API. To create REDB, navigate to Installed Operator > Redis Enterprise Operator > Redis Enterprise Database and click “Create RedisEnterpriseDatabase” to start the creation process using REDB custom resource. You can click on “YAML” and paste the sample YAML file below:

    apiVersion: app.redislabs.com/v1alpha1
    kind: RedisEnterpriseDatabase
    metadata:
    name: redb
    namespace: myproject
    spec:
    redisEnterpriseCluster:
    name: rec
    memorySize: 1GB
    tlsMode: enabled

    For configuring any additional parameters for the Redis Enterprise Database custom resource, visit this page.

    Once the REDB creation is done, you should see the status as “active”.

    Image

    Finally, navigate to Redis Enterprise Cluster admin console to verify if Redis Enterprise database is created or not.

    Image

    Click on the newly created database and then click on configuration option to see the overall details.

    Image

    Navigate back to the OpenShift console and verify the service object created for the Redis Database.

    Image

    Select “Routes” from the left-hand column and create a new route object for this database service with the sample YAML file below:

    kind: Route
    apiVersion: route.openshift.io/v1
    metadata:
    name: redb
    labels:
    app: redis-enterprise
    redis.io/cluster: rec
    spec:
    host: redb-re-test.apps.sumit.openshift.demo.redislabs.com
    to:
    kind: Service
    name: redis-12119-load-balancer
    weight: 100
    port:
    targetPort: redis
    tls:
    termination: passthrough
    insecureEdgeTerminationPolicy: None

    Note the Host name 'my-db-hostname' from the created route object.

    image

    Note the database password "my-db-password" from the database secret object with name “db-name-redb”.

    image

    Get the default CA certificate from the redis-enterprise-node container on any of the Redis Enterprise pods and paste the output in a file called “proxy_cert.pem”.

    $ oc exec -it <pod-name> -c redis-enterprise-node -- cat \ /etc/opt/redislabs/proxy_cert.pem

    Run the following openssl command, substituting your own values for 'my-db-hostname'.

    $  openssl s_client -connect <my-db-hostname>:443 -crlf \
    -CAfile ./proxy_cert.pem -servername <my-db-hostname>

    Authorize the database with the password and enter the ping command. If you are connected to the database, you will receive PONG back, as shown below:

    ...
    ...
    ...
    Verify return code: 0 (ok)
    ---
    auth 'my-db-password'
    +OK

    PING
    +PONG

    You can use the code below to test your access with Python, substituting your own values for 'my-db-hostname' and 'file-path'.

     import redis
    try:
    r = redis.StrictRedis(
    host='my-db-hostname',
    port=443,
    password='my-db-password',
    ssl=True,
    ssl_ca_certs='file-path')
    print (r.info())
    #test the connection
    print('Testing redis by setting a key and value..')
    r.set('hello', 'world')
    value = r.get('hello')
    print(value)
    except Exception as e:
    print (e)

    Your output should look something like this:

    {'redis_version': '6.0.13', 'redis_git_sha1': 0, 'redis_git_dirty': 0, 'redis_build_id': 0, 'redis_mode': 'standalone', 'os': 'Linux 4.18.0-305.30.1.el8_4.x86_64 x86_64'
    ...
    ...
    ...

    Testing redis by setting a key and value..
    b'world'

    In our next tutorial, we will learn how to deploy a sample Real-time chat application powered by Go and Redis on OpenShift.

    References:

    - + \ No newline at end of file diff --git a/create/portainer/index.html b/create/portainer/index.html index 24eea2b990..c2e042d077 100644 --- a/create/portainer/index.html +++ b/create/portainer/index.html @@ -4,7 +4,7 @@ Getting Started with Portainer and Redis | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    Getting Started with Portainer and Redis


    Profile picture for Ryan Gray
    Author:
    Ryan Gray, Technical Consultant at Portainer.io

    Redis is an in-memory data structure store, used as a distributed, in-memory key–value database, cache and message broker, with optional durability. Redis supports different kinds of abstract data structures, such as strings, lists, maps, sets, sorted sets, HyperLogLogs, bitmaps, streams, and spatial indices. ~Wikipedia

    portainer and redis

    You can’t travel far in the modern software world without finding that you need an in-memory data store. Today, the answer to “Which data store?” is often Redis. Also today, the answer to “How to Redis?” can be Portainer.

    In this blog we'll take you through using Portainer to set up Redis in three common scenarios.

    Scenario 1: Kubernetes

    Many organizations are either already using Kubernetes in some capacity or they are on a journey to adopt it, so let’s start there.

    First, log into Portainer and select a Kubernetes environment to manage. Then, in the navigation menu on the left, click "Namespaces", and then click the "Add namespace with form" button.

    • Give the namespace a name, in the case “redis”.
    • Turn off "Resource assignment" (only for the purpose of simplicity for this demo).
    • Click the “Create namespace” button.

    create_namespace

    Now that we have a namespace for Redis to operate in, let’s install it.

    In the navigation menu on the left, click “Helm”. If you haven’t already done so, add the Bitnami charts repository by typing https://charts.bitnami.com/bitnami into the “Additional repositories” form, and then click “Add repository”.

    helm

    Once the Bitnami charts repository has been added, you should see a list of Charts on this page. Find "Redis" and click on it.

    Note, you will see redis-cluster listed as an option. The redis-cluster Helm chart configures a six node cluster; three masters, and three slaves. The redis Helm chart we will use configures a much simpler three node cluster; one master, and two slaves. There are a number of other differences between these two Helm charts. For a complete list of differences, Bitnami has a good description here.

    redis-in-helm-list

    Next, scroll to the top of the page to configure Redis for deployment.

    • Select “redis” in the Namespace dropdown.
    • Enter “redis” for the Name.
    • Click “Show custom values”. I am going to expose Redis via NodePort 31000. I picked port 31000 because I know that ports 31000-31010 are open to my cluster. To get this done I will set the .service.type* to NodePort and the .service.nodePorts.redis* to 31000. As you can see in the screenshot below, these can currently be found on lines 431 and 441 in the Helm chart.
    • Click the “Install” button.

    redis-helm-custom-values

    When it’s finished, you can head to the Applications page by clicking Applications in the navigation menu on the left. When Kubernetes is finished bringing up Redis, you will see the Status as “Ready”.

    redis-ready

    Note that what is deployed is a STATEFULSET application, which means it persists data. You can see the volumes which have been created (which use the default storage class of your system) by clicking "Volumes".

    volumes

    See that each pod has its own copy/replica of the DB content, and note that it defaults to 8GB in size. If you need to change this, then its line 409 in the values file of the HELM deployment.

    change_volumes_size

    And that’s it. The only thing left to do is test it. Before we do that, we’re going to take a short detour. There are two facts about the Helm chart install of Redis that you should know. First, Redis will come up requiring authentication to connect to it. Second, a random password was created during installation. To find it, in Portainer click on “ConfigMaps & Secrets” in the navigation menu on the left. Find the secret named “redis” and click on it. The password that you’ll need to authenticate with is the value of the redis-password key.

    find-redis-password

    With that in hand, you can test that your Redis server is running. My typical rudimentary test of a Redis deployment is to connect to it with the redis-cli from my laptop and increment an arbitrary key. You’ll see that the client is connecting to our NodePort here and using the password we found in our Secrets.

    k8s-test In a shocking plot twist, this worked.

    Scenario 2: Docker Swarm

    Long live Docker Swarm! There is still plenty of Swarm in the wild and Portainer is managing a lot of it. Let’s bring up Redis in Swarm.

    For the purposes of this demo, we will use a Portainer App Template to deploy Redis, and this assumes you are using Portainer’s provided App Template Repo. You can check that under “Settings”.

    settings

    Now that you know you are good to go,

    • Click "App Templates" in the navigation menu on the left.
    • Find and click on Redis Cluster.

    redis-cluster

    • Fill in the name, in this case "redis".
    • Provide a SECURE password for Redis.
    • Click the “Deploy the stack” button.

    deploy-stack

    You will be taken to the “Stacks list” page and will see the new stack named “redis”. Click on “redis” to see information about this stack. Like this:

    stack-info

    To test, expand one of the services that you see on the stack details page (above). Then click on the container’s “exec console” icon.

    console-access-icon

    • Click on the “Connect” button to start the shell.

    console-connect

    Once the console opens, the Redis deployment can be tested like so: In case it’s difficult to see, the command used to connect to a redis node is redis-cli -h redis-node-1 -c -a my-password-here --no-auth-warning

    swarm-test

    Scenario 3: Docker & Docker Swarm - Can I just have a container?

    Sometimes I just want a Redis container, not a whole situation. Just a quick, unsophisticated Redis container. Here’s how to get that done in Portainer.

    • Click "App Templates" in the navigation menu on the left.
    • Toggle the "Show container templates" switch to on.

    show-container-templates-toggle

    • Find and click on Redis.
    • Give the application a name, in this case "redis".
    • Click on “Show advanced options”.
    • Set the port to map. In this example, the Docker host's port 6379 is forwarded to the container's port 6379, Redis’s default port for most communications.
    • Click on the “Deploy the container” button.

    deploy-container

    That’s it. You can test in the same way as before.

    container-container

    Three different scenarios - Three easy Redis deployments using Portainer

    - + \ No newline at end of file diff --git a/create/redis-functions/index.html b/create/redis-functions/index.html index 790fb55023..15a68640c4 100644 --- a/create/redis-functions/index.html +++ b/create/redis-functions/index.html @@ -4,7 +4,7 @@ Getting started with Redis Functions | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Getting started with Redis Functions


    Profile picture for Elena Kolevska
    Author:
    Elena Kolevska, Technical Enablement Manager, EMEA at Redis

    The most impactful addition to Redis version 7.0 is Redis Functions - a new programmability option, improving on scripts by adding modularity, reusability, and better overall developer experience.

    Functions are, in contrast to scripts, persisted in the .rdb and .aof files as well as automatically replicated to all the replicas, which makes them a first-class citizen of Redis.

    Redis has the capability of supporting multiple execution engines so in one of the future releases we’ll be able to write Redis Functions in Lua, Javascript, and more languages, but at the moment (Redis v7.0) the only supported language is Lua.

    A common pain point for developers is to maintain a consistent view of data entities through a logical schema. Redis Functions are ideally suited for solving this problem and in this tutorial, we will demonstrate just that - we’ll create a library with two functions; the first one will make sure that we can automatically set _created_at and _updated_at timestamps for hash keys and the second one will simply update the _updated_at timestamp without changing the other elements, simulating the “touch” Unix function. Let's go!

    Environment setup

    First, let’s set up a working environment with Redis 7. You can follow the installation instructions in the guides below, according to your operating system:

    Alternatively, you can spin up a Docker container with Redis Stack:

    $ docker run -p 6379:6379 --name redis-7.0 -it --rm redis/redis-stack:7.0.0-RC4
    note

    In the rest of this tutorial we’ll use the $ character to indicate that the command needs to be run on the command prompt and redis-cli> to indicate the same for a redis-cli prompt.

    Warm-Up

    Now that we have our Redis server running, we can create a file named mylib.lua and in it create a function named hset that would receive the keys and arguments we pass on the command line as parameters.

    Functions in Redis are always a part of a library, and a single library can have multiple functions.

    For starters, let's create a simple function that returns "Hello Redis 7.0" and save it in the mylib.lua file.

    #!lua name=mylib

    local function hset(keys, args)
    return "Hello Redis 7.0"
    end

    The first line specifies that we want to use the Lua engine to run this function and that its name is mylib.The name is the library identifier and we will use it every time we need to update it.

    Next, we need to register this function so it can be accessed through the Functions api. In the registration we specify that the function hset can be called with the name of my_hset:

    redis.register_function('my_hset', hset)

    The full code, so far, is:

    #!lua name=mylib
    local function hset(keys, args)
    return "Hello Redis 7.0"
    end

    redis.register_function('my_hset', hset)

    Before we can call the function on the command line, we need to load and register it with the Redis server:

    $ cat /path/to/mylib.lua | redis-cli -x FUNCTION LOAD

    Finally, let’s run the function we registered:

    redis-cli> FCALL my_hset 1 foo

    You should see the greeting "Hello Redis 7.0" as a response.

    Maintaining a consistent view of data entities through a logical schema

    We're now ready to start working on the requirement. First, let's implement the part that adds an updatedat timestamp:

    #!lua name=mylib
    local function hset(keys, args)
    local hash = keys[1] -- Get the key name
    local time = redis.call('TIME')[1] -- Get the current time from the Redis server

    -- Add the current timestamp to the arguments that the user passed to the function, stored in `args`
    table.insert(args, '_updated_at')
    table.insert(args, time)

    -- Run HSET with the updated argument list
    return redis.call('HSET', hash, unpack(args))
    end

    redis.register_function('my_hset', hset)

    After you updated the code, you will have to reload the library in the Redis server, using the replace argument to specify that you want to overwrite the previous version:

    $ cat /path/to/mylib.lua | redis-cli -x FUNCTION LOAD REPLACE

    If you try to create and update a hash through our function now, you will see that a timestamp is automatically added to it:

    redis-cli> FCALL my_hset 1 foo k1 v1 k2 v2
    3

    redis-cli> HGETALL foo
    1) "k1"
    2) "v1"
    3) "k2"
    4) "v2"
    5) "_updated_at"
    6) "1643581494"

    If we try to update the same key, we will see that the _updated_at timestamp got updated too:

    redis-cli> FCALL my_hset 1 foo k4 v4
    1

    redis-cli> HGETALL foo
    1) "k1"
    2) "v1"
    3) "k2"
    4) "v2"
    5) "_updated_at"
    6) "1643581580"
    7) "k4"
    8) "v4"

    Now let's add the logic that checks if the key is being created or updated, and adds the _created_at timestamp accordingly:

    #!lua name=mylib
    local function hset(keys, args)
    local hash = keys[1] -- Get the key name
    local time = redis.call('TIME')[1] -- Get the current time from the Redis server

    -- Check if the key exists and if not - add a `_created_at` timestamp
    local exists = redis.call('exists', hash)
    if exists==0 then
    table.insert(args, '_created_at')
    table.insert(args, time)
    end

    -- Add the current timestamp to the arguments that the user passed to the function, stored in `args`
    table.insert(args, '_updated_at')
    table.insert(args, time)

    -- Run HSET with the updated argument list
    return redis.call('HSET', hash, unpack(args))
    end

    redis.register_function('my_hset', hset)

    Reload the library:

    $ cat /path/to/mylib.lua | redis-cli -x FUNCTION LOAD REPLACE

    And try to create a new key:

    redis-cli> FCALL my_hset 1 bar k1 v1 k2 v2
    4

    redis-cli> HGETALL bar
    1) "k1"
    2) "v1"
    3) "k2"
    4) "v2"
    5) "_updated_at"
    6) "1643581710"
    7) "_created_at"
    8) "1643581710"

    Both a _created_at and _updated_at timestamps were added. If we update the key, we will see that the _updated_at timestamp will change, while the _created_at will stay the same:

    redis-cli> FCALL my_hset 1 bar k4 v4
    1

    redis-cli> HMGET bar _created_at _updated_at
    1) "1643581710"
    2) "1643581992"

    The second requirement was to implement a function that will allow us to update the _updated_at timestamp without updating any other fields. For that, we'll have to create a new function in our library:

    local function touch(keys, args)
    local time = redis.call('TIME')[1]
    return redis.call('HSET', keys[1], '_updated_at', time)
    end

    And we should also add the function registration:

    redis.register_function('my_touch', touch)

    The full code will now look like this:

    #!lua name=mylib
    local function hset(keys, args)
    local hash = keys[1] -- Get the key name
    local time = redis.call('TIME')[1] -- Get the current time from the Redis server

    local exists = redis.call('exists', hash)
    if exists==0 then
    table.insert(args, '_created_at')
    table.insert(args, time)
    end

    -- Add the current timestamp to the arguments that the user passed to the function, stored in `args`
    table.insert(args, '_updated_at')
    table.insert(args, time)

    -- Run HSET with the updated argument list
    return redis.call('HSET', hash, unpack(args))
    end

    local function touch(keys, args)
    local time = redis.call('TIME')[1]
    return redis.call('HSET', keys[1], '_updated_at', time)
    end

    redis.register_function('my_hset', hset)
    redis.register_function('my_touch', touch)

    Reload the updated library:

    $ cat /path/to/mylib.lua | redis-cli -x FUNCTION LOAD REPLACE

    And try running the new function and confirm that the _updated_at timestamp has indeed changed:

    redis-cli> FCALL my_touch 1 bar
    0

    redis-cli> HMGET bar _created_at _updated_at
    1) "1643581710"
    2) "1643582276"

    Thinking ahead

    One of the basic rules of software development is that you cannot rely on user input, so let’s make sure we don’t do that. If a user creates a string named my_string and tries to run the touch function on it they will get an error:

    redis-cli> SET my_string hello
    OK

    redis-cli> FCALL my_hset 1 my_string k1 v1
    (error) WRONGTYPE Operation against a key holding the wrong kind of value script: my_hset, on @user_function:17.

    Let’s handle this error by adding a type check:

    if exists==1 and redis.call('TYPE', hash)["ok"] ~= 'hash' then
    error = 'The key ' .. hash .. ' is not a hash'
    redis.log(redis.LOG_WARNING, error);
    return redis.error_reply(error)
    end

    The complete code:

    #!lua name=mylib
    local function hset(keys, args)
    local hash = keys[1] -- Get the key name
    local time = redis.call('TIME')[1] -- Get the current time from the Redis server

    local exists = redis.call('exists', hash)
    if exists==0 then
    table.insert(args, '_created_at')
    table.insert(args, time)
    end

    if exists==1 and redis.call('TYPE', hash)["ok"] ~= 'hash' then
    local error = 'The key ' .. hash .. ' is not a hash'
    redis.log(redis.LOG_WARNING, error);
    return redis.error_reply(error)
    end

    -- Add the current timestamp to the arguments that the user passed to the function, stored in `args`
    table.insert(args, '_updated_at')
    table.insert(args, time)

    -- Run HSET with the updated argument list
    return redis.call('HSET', hash, unpack(args))
    end

    local function touch(keys, args)
    local time = redis.call('TIME')[1]
    return redis.call('HSET', keys[1], '_updated_at', time)
    end

    redis.register_function('my_hset', hset)
    redis.register_function('my_touch', touch)

    If we reload the library and try again, we'll get an error with a helpful message:

    $ cat /path/to/mylib.lua | redis-cli -x FUNCTION LOAD REPLACE

    redis-cli> FCALL my_hset 1 my_string
    (error) The key my_string is not a hash

    Refactoring

    We can notice that we have some code repetition in our library, namely the type check and the getting of the timestamp in both our functions. That's a good opportunity for some code reuse. Let's extract the logic into their own functions:

    local function get_time()
    return redis.call('TIME')[1]
    end

    local function is_not_hash(key_name)
    if redis.call('TYPE', key_name)['ok'] ~= 'hash' then
    return 'The key ' .. key_name .. ' is not a hash.'
    end

    return nil
    end

    This function is only going to be called by our two existing functions and not from the outside, so that's why we don't need to register it. The refactored code will now look like this:

    #!lua name=mylib

    -- Get the current time from the Redis server
    local function get_time()
    return redis.call('TIME')[1]
    end

    local function is_not_hash(key_name)
    if redis.call('TYPE', key_name)['ok'] ~= 'hash' then
    return 'The key ' .. key_name .. ' is not a hash.'
    end

    return nil
    end

    local function hset(keys, args)
    local hash = keys[1] -- Get the key name
    local time = get_time()

    local exists = redis.call('exists', hash)
    if exists==0 then
    table.insert(args, '_created_at')
    table.insert(args, time)
    end

    local hash_error = is_not_hash(hash)
    if exists==1 and hash_error ~= nil then
    return redis.error_reply(hash_error)
    end

    -- Add the current timestamp to the arguments that the user passed to the function, stored in `args`
    table.insert(args, '_updated_at')
    table.insert(args, time)

    -- Run HSET with the updated argument list
    return redis.call('HSET', hash, unpack(args))
    end

    local function touch(keys, args)
    local hash = keys[1]

    local hash_error = is_not_hash(hash)
    if hash_error ~= nil then
    return redis.error_reply(hash_error)
    end

    return redis.call('HSET', hash, '_updated_at', get_time())
    end

    redis.register_function('my_hset', hset)
    redis.register_function('my_touch', touch)

    Using Function flags

    In this step, we'll get familiar with the use of Function flags - a piece of information that describes the circumstances under which a function is allowed to run. Currently, we support 5 flags:

    • no-writes - this flag indicates that the script only reads data but never writes.
    • allow-oom - this flag allows a script to execute even when the server is out of memory (OOM).
    • allow-stale - this flag enables running the script against a stale replica.
    • no-cluster - this flag doesn't allow the script to run in a clustered database.
    • allow-cross-slot-keys - this flag allows a script to access keys from multiple slots.

    To best illustrate why function flags are useful we'll work with a simple example that gets the basic info and favourite colours of a user. Save the following snippet in a file named get_user.lua:

    #!lua name=mynewlib
    local function get_user(keys, args)
    local hash = keys[1] -- Get the key name

    local user = redis.call('HGETALL', hash)
    local user_colours = redis.call('SMEMBERS', hash .. ':colours')

    table.insert(user, #user+1, 'colours')
    table.insert(user, #user+1, user_colours)


    return user
    end

    redis.register_function('get_user', get_user)

    If we try to execute this function with FCALL_RO - the read-only variant of FCALL, we will get an error, even though it only performs read operations. In order to demonstrate that the function is read-only, we need to use the no-writes flag in its registration:

    $ cat /path/to/get_user.lua | redis-cli -x FUNCTION LOAD

    redis-cli> FCALL_RO get_user 1 user:1
    (error) ERR Can not execute a script with write flag using *_ro command.

    #!lua name=mynewlib

    local function get_user(keys, args)
    local hash = keys[1] -- Get the key name

    local user = redis.call('HGETALL', hash)
    local user_colours = redis.call('SMEMBERS', hash .. ':colours')

    table.insert(user, #user+1, 'colours')
    table.insert(user, #user+1, user_colours)


    return user
    end

    redis.register_function{
    function_name='get_user',
    callback=get_user,
    flags={ 'no-writes' }
    }

    Finally, this will give us the expected result:

    $ cat /path/to/get_user.lua | redis-cli -x FUNCTION LOAD REPLACE

    redis-cli> FCALL_RO get_user 1 user:1
    1) "email"
    2) "foo@bar.com"
    3) "colours"
    4) 1) "green"
    2) "red"
    3) "blue"

    That's it, you now know how to write, load and execute Redis Function. Congratulations!

    For more information on Redis Functions you can check the Redis Functions documentation and to learn more about the Lua API you can check the Redis Lua API Reference.

    - + \ No newline at end of file diff --git a/create/redis-stack/index.html b/create/redis-stack/index.html index 5c172ea172..d8ea27183c 100644 --- a/create/redis-stack/index.html +++ b/create/redis-stack/index.html @@ -4,7 +4,7 @@ Introduction to Redis Stack | The Home of Redis Developers - + @@ -15,7 +15,7 @@ Select the region of your choice and then click "Let's start free".

    tip

    If you want to create a custom database with your preferred name and type of Redis, click "Create a custom database".

    create database

    Step 3. Listing the database details

    Once fully activated, you will see the database endpoints as shown below:

    verify database

    Step 4. Connecting to the database via RedisInsight

    RedisInsight is a visual tool that lets you do both GUI- and CLI-based interactions with your Redis database, and so much more when developing your Redis based application. It is a fully-featured pure Desktop GUI client that provides capabilities to design, develop and optimize your Redis application. It works with any cloud provider as long as you run it on a host with network access to your cloud-based Redis server. It makes it easy to discover cloud databases and configure connection details with a single click. It allows you to automatically add Redis Enterprise Software and Redis Enterprise Cloud databases.

    You can install Redis Stack on your local system to get RedisInsight GUI tool up and running. Ensure that you have the brew package installed in your Mac system.

     brew tap redis-stack/redis-stack
    brew install --cask redis-stack
     ==> Installing Cask redis-stack-redisinsight
    ==> Moving App 'RedisInsight-preview.app' to '/Applications/RedisInsight-preview.app'
    🍺 redis-stack-redisinsight was successfully installed!
    ==> Installing Cask redis-stack
    🍺 redis-stack was successfully installed!

    You can easily find the Applications folder on your Mac with Finder. Search "RedisInsight-v2" and click the icon to bring up the Redis Desktop GUI tool.

    Step 5. Add Redis database

    access redisinsight

    Step 6. Enter Redis Enterprise Cloud details

    Add the Redis Enterprise cloud database endpoint, port and password.

    access redisinsight

    Step 7. Verify the database under RedisInsight dashboard

    database details

    Step 8. Try Redis Stack tutorials

    In this tutorial, we will go through an example of a bike shop. We will show the different capabilities of Redis Stack.

    Choose "Redis Stack" from the left sidebar menu.

    access json workbench

    Step 9. Store and Manage JSON

    Let's examine the query for creating a single bike. Click "Create a bike" button:

    access json keys

    It will display a JSON.SET command with model, brand, price, type, specs and description details. The bikes:1 is the name of the Redis key that the JSON will be stored in.

    Step 10. Accessing part of a stored JSON document

    Click "Get specific fields" to access parts of a stored JSON document as shown in the following image:

    access json keys

    Next Steps

    Storing and querying JSON documents

    Follow this tutorial to learn how to store and query JSON documents using Redis Stack.

    Learn how to perform full-text search using Redis Stack.

    Probabilistic data structures

    Follow this tutorial to learn how to implement low latency and compact Probabilistic data structures using Redis Stack.

    Storing and querying time series data

    Learn how to store and query time series data using Redis Stack.

    - + \ No newline at end of file diff --git a/create/rediscloud/images/index.html b/create/rediscloud/images/index.html index 23a80843a2..1dd48a55cc 100644 --- a/create/rediscloud/images/index.html +++ b/create/rediscloud/images/index.html @@ -4,7 +4,7 @@ images | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/create/rediscloud/index.html b/create/rediscloud/index.html index 3421266597..55c93a7a35 100644 --- a/create/rediscloud/index.html +++ b/create/rediscloud/index.html @@ -4,7 +4,7 @@ Create database using Redis Enterprise Cloud | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    Create database using Redis Enterprise Cloud


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Redis Enterprise Cloud is a fully managed cloud service by Redis. Built for modern distributed applications, Redis Enterprise Cloud enables you to run any query, simple or complex, at sub-millisecond performance at virtually infinite scale without worrying about operational complexity or service availability. With modern probabilistic data structures and extensible data models, including Search, JSON, Graph, and Time Series, you can rely on Redis as your data-platform for all your real-time needs.

    Step 1. Create a free Cloud account

    Create your free Redis Enterprise Cloud account. Once you click on “Get Started”, you will receive an email with a link to activate your account and complete your signup process.

    tip

    For a limited time, use TIGER200 to get $200 credits on Redis Enterprise Cloud and try all the advanced capabilities!

    🎉 Click here to sign up

    Step 2. Create Your database

    Choose your preferred cloud vendor. Select the region and then click "Let's start free" to create your free database automatically.

    tip

    If you want to create a custom database with your preferred name and type of Redis, click "Create a custom database" option shown in the image.

    create database

    Step 3. Verify the database details

    You will be provided with Public endpoint URL and "Redis Stack" as the type of database with the list of modules that comes by default.

    verify database

    Step 4. Install RedisInsight

    RedisInsight is a visual tool that lets you do both GUI- and CLI-based interactions with your Redis database, and so much more when developing your Redis based application. It is a fully-featured pure Desktop GUI client that provides capabilities to design, develop and optimize your Redis application. It works with any cloud provider as long as you run it on a host with network access to your cloud-based Redis server. It makes it easy to discover cloud databases and configure connection details with a single click. It allows you to automatically add Redis Enterprise Software and Redis Enterprise Cloud databases.

    You can install Redis Stack on your local system to get RedisInsight GUI tool up and running. Ensure that you have brew package installed in your Mac system.

     brew tap redis-stack/redis-stack
    brew install --cask redis-stack
      ==> Installing Cask redis-stack-redisinsight
    ==> Moving App 'RedisInsight-preview.app' to '/Applications/RedisInsight-preview.app'
    🍺 redis-stack-redisinsight was successfully installed!
    ==> Installing Cask redis-stack
    🍺 redis-stack was successfully installed!

    Go to Applications and click "RedisInsight-v2" to bring up the Redis Desktop GUI tool.

    Step 5. Add Redis database

    access redisinsight

    Step 6. Enter Redis Enterprise Cloud details

    Add the Redis Enterprise cloud database endpoint, port and password.

    access redisinsight

    Step 7. Verify the database under RedisInsight dashboard

    database details

    Step 8. Try Redis Stack tutorials

    In this tutorial, we will go through an example of a bike shop. We will show the different capabilities of Redis Stack.

    Choose "Redis Stack" in the left sidebar.

    access json workbench

    Step 9. Store and Manage JSON

    Let's examine the query for creating a single bike. Click "Create a bike" button:

    access json keys

    It will display a JSON.SET command with model, brand, price, type, specs and description details. bikes:1 is the name of the Redis key where the JSON is stored.

    Step 10. Accessing parts of a stored JSON document

    Click "Get specific fields" to access part of a stored JSON document as shown in the following diagram:

    access json keys

    Next Steps

    Redis Launchpad
    - + \ No newline at end of file diff --git a/create/vercel/index.html b/create/vercel/index.html index 2b32214ddd..fb178029c4 100644 --- a/create/vercel/index.html +++ b/create/vercel/index.html @@ -4,7 +4,7 @@ Getting Started with Vercel and Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Getting Started with Vercel and Redis


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Vercel is a popular static web hosting serverless platform for frontend developers. The platform allows developers to host websites and web services, deploy instantly, and scale automatically with minimal configuration.

    Vercel is the preferred platform to host Next.js-based web applications. It allows you to deploy serverless functions that take an HTTP request and provide a response. You can use serverless functions to handle user authentication, form submission, database queries, custom Slack commands, and more.

    Vercel integrates well with popular tools, such as GitHub, GitLab, Lighthouse, Doppler, and Divjoy. NodeJS, Go, Python, and Ruby are the leading official runtimes supported by Vercel.

    vercel

    Features of Vercel

    • Vercel is focused on the build and deployment aspects of the JAMstack approach.
    • The Vercel API provides full control over the Vercel platform, exposed as simple HTTP service endpoints over SSL.
    • All endpoints live under the URL https://api.vercel.com and follow the REST architecture.
    • Vercel provides custom domains to deploy your code on the live server (vercel.app as the suffix in the domain).
    • Vercel provides you with an option to choose any framework of the repository you wish to deploy either Node.js, React, Gatsby, or Next.js (a full-stack React serverless framework that integrates seamlessly with Vercel).
    • Vercel integrates with a GitHub repository for automatic deployments upon commits.

    In this tutorial, you will learn how to deploy a Node.js based Redis chat application using Vercel in just 5 minutes.

    Table of Contents

    • Step 1. Set up a free Redis Enterprise Cloud account
    • Step 2. Install Vercel CLI
    • Step 3. Log in to your Vercel Account
    • Step 4. Clone your GitHub repository
    • Step 5. Create a vercel.json file
    • Step 6. Set up environment variables
    • Step 7. Deploy the Node.js app
    • Step 8. Access your app

    Step 1. Set up a free Redis Enterprise Cloud account

    Visit developer.redis.com/create/rediscloud/ and create a free Redis Enterprise Cloud account. Once you complete this tutorial, you will be provided with the database endpoint URL and password. Save it for future reference.

    tip

    For a limited time, use TIGER200 to get $200 credits on Redis Enterprise Cloud and try all the advanced capabilities!

    🎉 Click here to sign up

    alt_text

    Step 2. Install Vercel CLI

    npm i -g vercel

    vercel -v
    Vercel CLI 23.1.2
    23.1.2

    Step 3. Log in to your Vercel account

    The vercel login command allows you to log in to your Vercel account through Vercel CLI.

    vercel login
    Vercel CLI 23.1.2
    > Log in to Vercel github
    > Success! GitHub authentication complete for xx@xx.com
    Congratulations! You are now logged in. In order to deploy something, run `vercel`.
    💡 Connect your Git Repositories to deploy every branch push automatically (https://vercel.link/git).

    Once Vercel gets connected to your GitHub account, it displays your public repositories. Let us clone https://github.com/redis-developer/basic-redis-chat-app-demo-nodejs to the local repository.

    Step 4. Clone the GitHub repository

    The complete source code of the Redis Chat application is hosted here. React and Socket.IO are used for building the frontend while Node.js and Redis power the backend. Redis is used mainly as a database to keep the user/messages data and for sending messages between connected servers.

    git clone https://github.com/redis-developer/basic-redis-chat-app-demo-nodejs

    Step 5. Create a vercel.json for Node.js app

    If you run the vercel init command, it will list various frameworks but you won’t be able to find Node.js, hence you will need to create a vercel.json file as shown below:

    {
    "version": 2,
    "builds": [
    {
    "src": "./index.vercel.js",
    "use": "@vercel/node"
    }
    ],
    "routes": [
    {
    "src": "/(.*)",
    "dest": "/"
    }
    ]
    }

    Step 6. Set up environment variables

    The vercel env command is used to manage environment variables under a Project, providing functionality to list, add, remove, and pull.

    Let us first set up environment variables.

     vercel env add
    Vercel CLI 23.1.2
    ? What's the name of the variable? REDIS_ENDPOINT_URI
    ? What's the value of REDIS_ENDPOINT_URI? redis-XXXX.c110-qa.us-east-1-1-1.ec2.qa-cloud.redislabs.com:XXX

    Listing the environment variables:

    vercel env ls
    Vercel CLI 23.1.2
    > Environment Variables found in Project basic-redis-chat-app-demo-nodejs [684ms]

    name value environments created
    REDIS_PASSWORD Encrypted Production, Preview, Development 2d ago
    REDIS_ENDPOINT_URL Encrypted Production, Preview, Development 2d ago

    Step 7. Deploy the Node.js app

    When you run a vercel command in a directory for the first time, Vercel CLI needs to know which scope and Project you want to deploy your directory to. You can choose to either link an existing project or create a new one.

    vercel
    Vercel CLI 23.1.2
    ? Set up and deploy "~/projects/10feb/basic-redis-chat-app-demo-nodejs"? [Y/n] y
    ? Which scope do you want to deploy to? redis-developer
    ? Found project "redis-developer/basic-redis-chat-app-demo-nodejs". Link to it? [Y/n] y
    🔗 Linked to redis-developer/basic-redis-chat-app-demo-nodejs (created .vercel)
    🔍 Inspect: https://vercel.com/redis-developer/basic-redis-chat-app-demo-nodejs/5KZydRNsXwnjRxDYa65x4Ak8KwZT [4s]
    ✅ Preview: https://basic-redis-chat-app-demo-nodejs-redis-developer.vercel.app [copied to clipboard] [27s]
    📝 To deploy to production (basic-redis-chat-app-demo-nodejs.vercel.app), run `vercel --prod`
    ❗️ Due to `builds` existing in your configuration file, the Build and Development Settings defined in your Project Settings will not apply. Learn More: https://vercel.link/unused-build-settings

    Once the deployment process has completed, a new .vercel directory will be added to your directory. The .vercel directory contains both the organization and project ID of your project.

    The "project.json" file contains:

    • The ID of the Vercel project that you linked ("projectId")

    • The ID of the user or team your Vercel project is owned by ("orgId")

    note

    Vercel CLI automatically detects the framework you are using and offers default project settings accordingly.

    Step 8. Accessing the app

    Run the following command to deploy the Redis chat app to the Prod environment.

    vercel --prod
    Vercel CLI 23.1.2
    🔍 Inspect: https://vercel.com/redis-developer/basic-redis-chat-app-demo-nodejs/GoRdy7LKxqhBfJNW8hSvvFLQC6EN [2s]
    ✅ Production: https://basic-redis-chat-app-demo-nodejs.vercel.app [copied to clipboard] [14s]


    By now, you will be able to login to Chat app and start chatting.

    alt_text

    The chat server works as a basic REST API which involves keeping the session and handling the user state in the chat rooms (besides the WebSocket/real-time part). When a WebSocket/real-time server is instantiated, which listens for the next events:

    alt_text

    If you want to know how the chat app works internally, refer to this detailed blog tutorial

    References

    - + \ No newline at end of file diff --git a/create/windows/index.html b/create/windows/index.html index 2613aa4511..4662752221 100644 --- a/create/windows/index.html +++ b/create/windows/index.html @@ -4,7 +4,7 @@ How to Install Redis on Windows | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    How to Install Redis on Windows


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    You can run Redis on Windows 10 using Windows Subsystem for Linux(a.k.a WSL2). WSL2 is a compatibility layer for running Linux binary executables natively on Windows 10 and Windows Server 2019. WSL2 lets developers run a GNU/Linux environment (that includes command-line tools, utilities, and applications) directly on Windows.

    Follow these instructions to run a Redis database on Microsoft Windows 10.

    Step 1: Turn on Windows Subsystem for Linux

    In Windows 10, Microsoft replaced Command Prompt with PowerShell as the default shell. Open PowerShell as Administrator and run this command to enable Windows Subsystem for Linux (WSL):

     Enable-WindowsOptionalFeature -Online -FeatureName Microsoft-Windows-Subsystem-Linux

    Reboot Windows after making the change — note that you only need to do this once.

    Step 2: Launch Microsoft Windows Store

     start ms-windows-store:

    Then search for Ubuntu, or your preferred distribution of Linux, and download the latest version.

    Step 3: Install Redis server

    Installing Redis is simple and straightforward. The following example works with Ubuntu (you'll need to wait for initialization and create a login upon first use):

     sudo apt-add-repository ppa:redislabs/redis
    sudo apt-get update
    sudo apt-get upgrade
    sudo apt-get install redis-server
    note

    The sudo command may or may not be required based on the user configuration of your system.

    Step 4: Restart the Redis server

    Restart the Redis server as follows:

     sudo service redis-server restart

    Step 5: Verify if your Redis server is running

    Use the redis-cli command to test connectivity to the Redis database.

     $ redis-cli
    127.0.0.1:6379> set user:1 "Jane"
    127.0.0.1:6379> get user:1
    "Jane"
    note

    By default, Redis has 0-15 indexes for databases, you can change that number databases NUMBER in redis.conf.

    Step 6: Stop the Redis Server

     sudo service redis-server stop

    Next Steps

    How to run Redis GUI tool on Windows

    Windows logo

    Follow this tutorial in order to run RedisInsight on Windows

    Model Redis data in your .NET Applications

    redis OM logo

    Learn how to connect to a Redis database with Redis OM Dotnet

    References

    Redis University

    Check out this video if you want to see Redis on Windows 10 Home Edition in action.

    - + \ No newline at end of file diff --git a/devcember/index.html b/devcember/index.html index d0620cc664..d60cc0f866 100644 --- a/devcember/index.html +++ b/devcember/index.html @@ -4,7 +4,7 @@ Redis DEVcember | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Redis DEVcember


    Profile picture for Suze Shardlow
    Author:
    Suze Shardlow, Developer Community Manager at Redis
    Profile picture for Simon Prickett
    Author:
    Simon Prickett, Principal Developer Advocate at Redis

    What's it all About?

    We're excited to announce DEVcember, a month-long festival of live online events and fun challenges, showcasing Redis and our community!

    Join us on Twitch or YouTube for a daily 15-20 minute live stream on Monday to Friday where we'll introduce Redis concepts, share projects that you can contribute to and have some fun banter. For Saturday and Sunday, we’ll give you a mini challenge to complete.

    Don't worry if you miss a stream - we'll post them on YouTube and embed them in the schedule below.

    The fun begins Wednesday 1st December 2021 at 9am UTC.

    Live Schedule

    We'll be on the Redis channel on Twitch and YouTube each working day in December… times will vary and we'll update the schedule below daily, revealing each day's topic as we go.

    Date/TimeTopicJoin Us!
    Wed 1 Dec, 9am UTCWelcome to DEVcember, Get Started with Redis in the Cloud
    Thu 2 Dec, 10.30am UTCUp and Running with RedisInsight
    Fri 3 Dec, 8pm UTCThe Redis List goes on and on!
    Sat 4 / Sun 5 DecFirst weekend hands-on exerciseTake the challenge on GitHub
    Mon 6 Dec, 4:30pm UTCLet's try out Redis OM for Python
    Tue 7 Dec, 5pm UTCThe Scoop on Big O Notation
    Wed 8 Dec, 4:30pm UTCGet! Set! Go!
    Thu 9 Dec, 10:30am UTCHave I Seen You Before? Introducing Bloom Filters
    Fri 10 Dec, 6pm UTCYou Can (Mostly) Count on Hyperloglog!
    Sat 11 / Sun 12 DecSecond weekend hands-on exerciseTake the challenge on GitHub
    Mon 13 Dec, 4pm UTCSort it out! All about Sorted Sets
    Tue 14 Dec, 4:45pm UTCWhat's the Score? Top K with Redis Bloom!
    Wed 15 Dec, 10am UTCSeek and You May Find… Introducing Redis Search! (Part 1)
    Wed 15 Dec, 10am UTCSeek and You May Find… Introducing Redis Search! (Part 2)
    Thu 16 Dec, 3:45pm UTCIntroducing Redis OM for Node.js
    Fri 17 Dec, 4pm UTCObject Mapping and More! Redis OM for .NET
    Sat 18 / Sun 19 DecThird weekend hands-on exerciseTake the challenge on GitHub
    Mon 20 Dec, 1pm UTCDon't Cross the (Redis) Streams!
    Tue 21 Dec, 5pm UTCWhat's the deal with Pub/Sub?
    Wed 22 Dec, 5:15pm UTCSpring into Redis OM! (Redis OM for Java/Spring Framework)
    Thu 23 Dec, 5pm UTCFinding your way with Redis Geospatial!
    Fri 24 Dec, 9:15am UTCHerding Cats with Redis JSON

    Meet the Team

    Your regular presenters are:

    Suze Shardlow, Developer Community Manager

    Suze leads developer community at Redis. She’s a software engineer, published tech author and event MC who has spoken at several global tech conferences. When she’s not talking databases and putting together content, she loves crafting!

    Justin Castilla, Developer Advocate

    Justin is a Developer Advocate at Redis. He has helped to produce several courses at Redis University and has created numerous videos for the Redis YouTube channel.

    Simon Prickett, Developer Advocacy Manager

    Simon Prickett is the Developer Advocacy Manager at Redis. He began his career writing C++ for Hewlett-Packard Labs, and has subsequently held senior roles with companies ranging from startups to enterprises including Capital One, USA Today, and New Zealand’s Customs Service. Away from professional life Simon enjoys traveling, cycling, and building devices with microcontrollers.


    We'll also feature guest appearances from other members of the Redis developer relations team!

    Join the Fun!

    Be sure to follow us on Twitch to be notified when we're online! We'd also love to see you on the Redis Discord, where there's a dedicated channel for all things DEVcember. This is where you can chat with us throughout the day and get help and information about the fun Redis commands / coding challenges we'll post for you to try on weekends.

    If you're on Twitter, use hashtag #RedisDEVcember to join the conversation.

    Learn More

    To learn more about Redis and get yourself a certificate that you can add to your LinkedIn profile, check out our free online courses at Redis University which are available all year round. There's something for everyone!

    - + \ No newline at end of file diff --git a/develop/C/index.html b/develop/C/index.html index 2cf277260f..261adb1b45 100644 --- a/develop/C/index.html +++ b/develop/C/index.html @@ -4,7 +4,7 @@ C and Redis | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    C and Redis


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Find tutorials, examples and technical articles that will help you to develop with Redis and C.

    Getting Started

    In order to use Redis with C, you need a C Redis client. For your first steps with C and Redis, this article will show how to use the recommended library: hiredis.

    Hiredis is a minimalistic C client library for the Redis database.It is minimalistic because it just adds minimal support for the protocol, but at the same time it uses a high level printf-alike API in order to make it much higher level than otherwise suggested by its minimal code base and the lack of explicit bindings for every Redis command.

    Step 1. Install the pre-requisites

    Version 1.0.0 marks the first stable release of Hiredis. Follow the below steps to install the pre-requisite packages in order to compile the latest version of hiredis.

     brew install gcc make

    Run the below command to run Redis server

     redis-server

    Step 2. Install and compile hiredis

     wget https://github.com/redis/hiredis/archive/master.zip
    make
    make install

    Step 3. Copy the below C code:

     #include <stdio.h>
    #include <stdlib.h>
    #include <string.h>
    #include <hiredis/hiredis.h>

    int main (int argc, char **argv) {
    redisReply *reply;
    redisContext *c;

    c = redisConnect("127.0.0.1", 6381);
    if (c->err) {
    printf("error: %s\n", c->errstr);
    return 1;
    }

    /* PINGs */
    reply = redisCommand(c,"PING %s", "Hello World");
    printf("RESPONSE: %s\n", reply->str);
    freeReplyObject(reply);

    redisFree(c);
    }

    Step 4. Compile the code

     gcc redistest.c -o redistest -I /usr/local/include/hiredis -lhiredis

    Step 5. Test the code

     ./redistest
    RESPONSE: Hello World

    More C Clients Resources

    • hiredis-cluster - C client library for Redis Cluster

    • libredis - A C based general low-level PHP extension and client library for Redis, focusing on performance, generality and efficient parallel communication with multiple Redis servers.

    • hiredispool - Provides connection pooling and auto-reconnect for hiredis. It is also minimalistic and easy to do customization.

    - + \ No newline at end of file diff --git a/develop/deno/index.html b/develop/deno/index.html index 26b0d4af9e..6eb7544dfb 100644 --- a/develop/deno/index.html +++ b/develop/deno/index.html @@ -4,7 +4,7 @@ Deno and Redis | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    Deno and Redis


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    With over 80,000 stars and 670+ contributors, Deno is a popular modern runtime for JavaScript and TypeScript. It is built on V8, an open-source JavaScript engine developed by the Chromium Project for Google Chrome and Chromium web browsers.

    deno

    Features of Deno

    • Deno is secure by default. It executes code in a sandbox environment, disallowing runtime access to the underlying filesystem, environment variables and scripts.
    • Deno supports both JavaScript and TypeScript out of the box.
    • Deno ships as a single executable with no dependencies.
    • Comes with built-in utilities such as a dependency inspector (deno info) and a code formatter (deno fmt).

    Getting Started

    deno.land/x is a hosting service for Deno scripts. It caches releases of open source modules stored on GitHub and serves them at one easy-to-remember domain. These modules contain small scripts that demonstrate use of Deno and its standard module.

    The basic format of code URLs is

    https://deno.land/x/IDENTIFIER@VERSION/FILE_PATH

    Example:

    https://deno.land/std@0.126.0/examples

    In order to use Redis with Deno you will need a Deno Redis client. In the following sections, we will demonstrate the use of an experimental implementation of a Redis client for Deno.

    Step 1. Set up a free Redis Enterprise Cloud account

    Visit developer.redis.com/create/rediscloud/ and create a free Redis Enterprise Cloud account. Once you complete this tutorial, you will be provided with the database endpoint URL and password. Save it for future reference.

    tip

    For a limited time, use TIGER200 to get $200 credits on Redis Enterprise Cloud and try all the advanced capabilities!

    🎉 Click here to sign up

    Database details

    Step 2. Get Deno

    brew install deno

    Step 3. Verify if Deno is properly installed

    deno -V
    deno 1.19.0

    Step 4. Create an empty file with the following content

    The following code creates a connection to Redis using Deno:

    import { connect } from 'https://deno.land/x/redis/mod.ts';
    const redis = await connect({
    hostname: 'redis-18386.c110-qa.us-east-1-1.ec2.qa-cloud.redislabs.com',
    port: 18386,
    password: 'XXXX',
    });
    const ok = await redis.set('foo', 'bar');
    const foo = await redis.get('foo');

    Replace the values of hostname and port to match those of your Redis database, and add an extra password field if needed.

    Step 5. Executing the script

    Deno can grab scripts from multiple sources. For example, you can provide a filename, a URL, or'-' to read the file from stdin. You can run a JavaScript or TypeScript program by executing deno run.

    deno run --allow-net redis.ts

    When you run the script, the value of foo should be output. You can verify this by running the monitor command:

    redis-15692.c264.ap-south-1-1.ec2.cloud.redislabs.com:15692> monitor
    OK
    1646536310.435577 [0 122.171.165.94:50193] "AUTH" "(redacted)"
    1646536310.475578 [0 122.171.165.94:50193] "SET" "foo" "bar"
    1646536310.511578 [0 122.171.165.94:50193] "GET" "foo"

    Additional references:

    Redis Launchpad
    - + \ No newline at end of file diff --git a/develop/dotnet/aspnetcore/caching/basic-api-caching/index.html b/develop/dotnet/aspnetcore/caching/basic-api-caching/index.html index aadb93820e..583b148b81 100644 --- a/develop/dotnet/aspnetcore/caching/basic-api-caching/index.html +++ b/develop/dotnet/aspnetcore/caching/basic-api-caching/index.html @@ -4,7 +4,7 @@ How to add a basic API Cache to your ASP.NET Core application | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    How to add a basic API Cache to your ASP.NET Core application

    Redis is synonymous with caching, and for a good reason, Redis is fast and easy to get up and running with and does an excellent job as a cache.

    There are two big reasons to use a cache over the source of truth.

    1. Time - caches are much faster
    2. Cost - sometimes going to a source of truth has a monetary cost. For example, API endpoints sometimes charge per request. This means that we want to limit unnecessary requests to a particular endpoint.

    In the second case, unnecessary requests to the API endpoint are wasteful and can add up to a high financial cost to the application over time. Therefore, in this tutorial, we will look at caching the results of API requests to prevent us from having to make round trips to an API.

    For our example, we will use the US National Weather Service's (NWS) Weather API - which is free and requires no authentication beyond a user-agent. We will build an API to get a weather forecast based on latitude and longitude using ASP.NET Core.

    Prerequisites

    Start Redis

    Let's start out by starting redis; for development purposes, you can just use docker:

    docker run -p 6379:6379 redis

    If you are getting ready to deploy to production, you may want to make use of the Redis Cloud

    Create the Project

    Next, we'll create the ASP.NET Core API project using the .NET CLI.

    dotnet new webapi -n BasicWeatherCacheApp

    Then we'll cd into the BasicWeatherCacheApp directory that we just created, and we will add the StackExchange.Redis package to the project:

    dotnet add package StackExchange.Redis

    Add Redis Cache to ASP.NET Core app

    Open up the program.cs file. This is where the services are all defined and injected into the project. Add the following to add the StackExchange.Redis ConnectionMultiplexer Redis to the ASP.NET Core application as well as an HttpClient:

    builder.Services.AddSingleton<IConnectionMultiplexer>(ConnectionMultiplexer.Connect("localhost"));
    builder.Services.AddHttpClient();

    Create Data Structures to Hold Results

    The resulting structure from the NWS is a bit verbose, but we will endeavor to just capture the future forecasts for a particular area.

    We'll create two structures, the first will contain the actual forecast, and the second will have the list of forecasts from a given request, as well as the time it took to accumulate the forecasts. For the first, we'll use the default WeatherForecast class that's created in the template, open up WeatherForecast.cs, and replace its contents with:

    public class WeatherForecast
    {
    [JsonPropertyName("number")]
    public int Number { get; set; }

    [JsonPropertyName("name")]
    public string Name { get; set; }

    [JsonPropertyName("startTime")]
    public DateTime StartTime { get; set; }

    [JsonPropertyName("endTime")]
    public DateTime EndTime { get; set; }

    [JsonPropertyName("isDayTime")]
    public bool IsDayTime { get; set; }

    [JsonPropertyName("temperature")]
    public int Temperature { get; set; }

    [JsonPropertyName("temperatureUnit")]
    public string? TemperatureUnit { get; set; }

    [JsonPropertyName("temperatureTrend")]
    public string? TemperatureTrend { get; set; }

    [JsonPropertyName("windSpeed")]
    public string? WindSpeed { get; set; }

    [JsonPropertyName("windDirection")]
    public string? WindDirection { get; set; }

    [JsonPropertyName("shortForecast")]
    public string? ShortForecast { get; set; }

    [JsonPropertyName("detailedForecast")]
    public string? DetailedForecast { get; set; }
    }

    Next, create the file ForecastResult.cs and add the following to it:

    public class ForecastResult
    {
    public long ElapsedTime { get; }
    public IEnumerable<WeatherForecast> Forecasts { get; }

    public ForecastResult(IEnumerable<WeatherForecast> forecasts, long elapsedTime)
    {
    Forecasts = forecasts;
    ElapsedTime = elapsedTime;
    }
    }

    Dependency Injection Into the Weather Forecast Controller

    Now that we've set up our app, we need to configure our controller. First, open the Controllers/WeatherForecastController (this controller is automatically created along with the template) and add the following code to inject what we need into it.

    private readonly HttpClient _client;
    private readonly IDatabase _redis;

    public WeatherForecastController(HttpClient client, IConnectionMultiplexer muxer)
    {
    _client = client;
    _client.DefaultRequestHeaders.UserAgent.Add(new ProductInfoHeaderValue("weatherCachingApp","1.0") );
    _redis = muxer.GetDatabase();
    }

    Query the API

    To query the Weather API to find the forecast for a particular latitude and longitude, we need to go through a 2 step process. First, there's no natural API for querying the forecast based on geolocation. Instead, every geolocation is assigned a particular office out of which it's monitored, and each office has a grid 2D grid that a specific latitude and longitude will map to. Fortunately, there's a points API endpoint to which you can pass your latitude and longitude. This will give you the particular office out of which the point is valid and the x/y grid coordinates for that point. You need to query the forecast endpoint for that grid points for that office and then pull out the forecasted periods. The following accomplishes all this.

    private async Task<string> GetForecast(double latitude, double longitude)
    {
    var pointsRequestQuery = $"https://api.weather.gov/points/{latitude},{longitude}"; //get the URI
    var result = await _client.GetFromJsonAsync<JsonObject>(pointsRequestQuery);
    var gridX = result["properties"]["gridX"].ToString();
    var gridY = result["properties"]["gridY"].ToString();
    var gridId = result["Properties"]["gridId"].ToString();
    var forecastRequestQuery = $"https://api.weather.gov/gridpoints/{gridId}/{gridX},{gridY}/forecast";
    var forecastResult = await _client.GetFromJsonAsync<JsonObject>(forecastRequestQuery);
    var periodsJson = forecastResult["properties"]["periods"].ToJsonString();
    return periodsJson;
    }

    Write the Forecast Action

    Given the multiple API Calls, it's clear why using a cache is critical for our application. These forecasts do not update very often, every 1-3 hours. That means making two back-to-back API requests can be expensive in both time and money. In the case of this API, there's no financial cost associated with the requests. However, with a commercial API, there often times will be per-request costs. When we are writing this action, we will check the cache. If the cache contains the relevant forecast, we will return that. Otherwise, we will hit the API, save the result, and set the key to expire. We'll time it and then reply back with the result and time it took.

    [HttpGet(Name = "GetWeatherForecast")]
    public async Task<ForecastResult> Get([FromQuery] double latitude, [FromQuery] double longitude)
    {
    string json;
    var watch = Stopwatch.StartNew();
    var keyName = $"forecast:{latitude},{longitude}";
    json = await _redis.StringGetAsync(keyName);
    if (string.IsNullOrEmpty(json))
    {
    json = await GetForecast(latitude, longitude);
    var setTask = _redis.StringSetAsync(keyName, json);
    var expireTask = _redis.KeyExpireAsync(keyName, TimeSpan.FromSeconds(3600));
    await Task.WhenAll(setTask, expireTask);
    }

    var forecast =
    JsonSerializer.Deserialize<IEnumerable<WeatherForecast>>(json);
    watch.Stop();
    var result = new ForecastResult(forecast, watch.ElapsedMilliseconds);

    return result;
    }

    Run the App

    All that's left to do now is run the app. Run dotnet run in your console, and open up to https://localhost:PORT_NUMBER/swagger/index.html and use the GUI to send a request. Otherwise, you can use a cURL to send the request. The first time you send a new latitude and longitude, you'll notice that it takes pretty long to send the request, ~1 second. When you make the request again, and it hits the cache, it will drop dramatically to ~1-5ms.

    Resources

    • Source code for this demo is located in GitHub
    • More documentation for the StackExchange.Redis library is located on it's docs site
    - + \ No newline at end of file diff --git a/develop/dotnet/aspnetcore/rate-limiting/fixed-window/index.html b/develop/dotnet/aspnetcore/rate-limiting/fixed-window/index.html index 8224e079fa..317e501e07 100644 --- a/develop/dotnet/aspnetcore/rate-limiting/fixed-window/index.html +++ b/develop/dotnet/aspnetcore/rate-limiting/fixed-window/index.html @@ -4,7 +4,7 @@ How to implement a Fixed Window Rate Limiting app using ASP.NET Core & Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    How to implement a Fixed Window Rate Limiting app using ASP.NET Core & Redis


    Profile picture for Steve Lorello
    Author:
    Steve Lorello, Senior Field Engineer at Redis

    In this tutorial, we will build an app that implements basic fixed-window rate limiting using Redis & ASP.NET Core.

    Prerequisites

    Startup Redis

    Before we begin, startup Redis. For this example, we'll use the Redis docker image:

     docker run -dp 6379:6379 redis

    Create Project

    In your terminal, navigate to where you want the app to live and run:

     dotnet new webapi -n FixedRateLimiter --no-https

    Change directory to FixedRateLimiter and run the below command:

    dotnet add package StackExchange.Redis

    Open the FixedRateLimiter.csproj file in Visual Studio or Rider (or open the folder in VS Code) and in the Controllers folder, add an API controller called RateLimitedController, when all this is complete, RateLimitedController.cs should look like the following:

      namespace FixedRateLimiter.Controllers
    {
    [ApiController]
    [Route("api/[controller]")]
    public class RateLimitedController : ControllerBase
    {

    }
    }

    Initialize The Multiplexer

    To use Redis, we're going to initialize an instance of the ConnectionMultiplexer from StackExchange.Redis, to do so, go to the ConfigureServices method inside of Startup.cs and add the following line:

     services.AddSingleton<IConnectionMultiplexer>(ConnectionMultiplexer.Connect("localhost"));

    Inject the ConnectionMultiplexer

    In RateLimitedController.cs inject the ConnectionMultiplexer into the controller and pull out an IDatabase object from it with the following:

     private readonly IDatabase _db;
    public RateLimitedController(IConnectionMultiplexer mux)
    {
    _db = mux.GetDatabase();
    }

    Add a Simple Route

    We will add a simple route that we will Rate Limit; it will be a POST request route on our controller. This POST request will use Basic auth - this means that each request is going to expect a header of the form Authorization: Basic <base64encoded> where the base64encoded will be a string of the form apiKey:apiSecret base64 encoded, e.g. Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==. This route will parse the key out of the header and return an OK result.

     [HttpPost("simple")]
    public async Task<IActionResult> Simple([FromHeader]string authorization)
    {
    var encoded = string.Empty;
    if(!string.IsNullOrEmpty(authorization)) encoded = AuthenticationHeaderValue.Parse(authorization).Parameter;
    if (string.IsNullOrEmpty(encoded)) return new UnauthorizedResult();
    var apiKey = Encoding.UTF8.GetString(Convert.FromBase64String(encoded)).Split(':')[0];
    return Ok();
    }

    With that setup, you should run the project with a dotnet run, and if you issue a POST request to https://localhost:5001/api/RateLimited/simple - with apiKey foobar and password password, you will get a 200 OK response back.

    You can use this cURL request to elicit that response:

     curl -X POST -H "Content-Length: 0" --user "foobar:password" http://localhost:5000/api/RateLimited/simple

    Fixed Window Rate Limiting Lua Script

    We are going to build a Fixed Window Rate limiting script. A fixed Window Rate Limiter will limit the number of requests in a particular window in time. In our example, we will limit the number of requests to a specific route for a specific API Key. So, for example, if we have the apiKey foobar hitting our route api/ratelimited/simple at 12:00:05 and we have a 60-second window, in which you can send no more than ten requests, we need to:

    1. Format a key from our info, e.g. Route:ApiKey:time-window - in our case, this would be api/ratelimited/simple:foobar:12:00
    2. Increment the current value of that key
    3. Set the expiration for that key for 60 seconds
    4. If the current value of the key is less than or equal to the max requests allowed, increment the key and return false (not rate limited)
    5. If the current value of the key is greater than or equal to the max number of requests allowed, return true (rate limited)

    The issue we need to contend with here is that this rate-limiting requires atomicity for all our commands (e.g. between when we get and increment the key we don't want anyone coming in and hitting it). Because of this, we will run everything on the server through a Lua script. Now there are two ways to write this Lua script. The traditional way, where you drive everything off of keys and arguments, the following

     local key = KEYS[1]
    local max_requests = tonumber(ARGV[1])
    local expiry = tonumber(ARGV[2])
    local requests = redis.call('INCR',key)
    redis.call('EXPIRE', key, expiry)
    if requests < max_requests then
    return 0
    else
    return 1
    end

    Alternatively, StackExchange.Redis contains support for a more readable mode of scripting they will let you name arguments to your script, and the library will take care of filling in the appropriate items at execution time. That mode of scripting, which we will use here, will produce this script:

     local requests = redis.call('INCR',@key)
    redis.call('EXPIRE', @key, @expiry)
    if requests < tonumber(@maxRequests) then
    return 0
    else
    return 1
    end

    Loading the Script

    To run a Lua script with StackExchange.Redis, you need to prepare a script and run it. So consequentially add a new file Scripts.cs to the project, and in that file add a new static class called Scripts; this will contain a constant string containing our script and a getter property to prepare the script for execution.

     using StackExchange.Redis;
    namespace FixedRateLimiter
    {
    public static class Scripts
    {
    public static LuaScript RateLimitScript => LuaScript.Prepare(RATE_LIMITER);

    private const string RATE_LIMITER = @"
    local requests = redis.call('INCR',@key)
    redis.call('EXPIRE', @key, @expiry)
    if requests < tonumber(@maxRequests) then
    return 0
    else
    return 1
    end
    ";
    }
    }

    Executing the Script

    With the script setup, all that's left to do is build our key, run the script, and check the result. We already extracted the apiKey earlier, so; we will use that, the request path, and the current time to create our key. Then, we will run ScriptEvaluateAsync to execute the script, and we will use the result of that to determine whether to return a 429 or our JSON result. Add the following just ahead of the return in our Simple method:

     var script = Scripts.RateLimitScript;
    var key = $"{Request.Path.Value}:{apiKey}:{DateTime.Now:hh:mm}";
    var res = await _db.ScriptEvaluateAsync(script, new {key = new RedisKey(key), expiry = 60, maxRequests = 10});
    if ((int) res == 1)
    return new StatusCodeResult(429);

    Our Simple route's code should look like this:

     [HttpPost("simple")]
    public async Task<IActionResult> Simple([FromHeader]string authorization)
    {
    var encoded = string.Empty;
    if(!string.IsNullOrEmpty(authorization)) encoded = AuthenticationHeaderValue.Parse(authorization).Parameter;
    if (string.IsNullOrEmpty(encoded)) return new UnauthorizedResult();
    var apiKey = Encoding.UTF8.GetString(Convert.FromBase64String(encoded)).Split(':')[0];
    var script = Scripts.RateLimitScript;
    var key = $"{Request.Path.Value}:{apiKey}:{DateTime.UtcNow:hh:mm}";
    var res = await _db.ScriptEvaluateAsync(script, new {key = new RedisKey(key), expiry = 60, maxRequests = 10});
    if ((int) res == 1)
    return new StatusCodeResult(429);
    return new JsonResult(new {key});
    }

    Now, if we start our server back up with dotnet run and try running the following command:

     for n in {1..21}; do echo $(curl -s -w " HTTP %{http_code}, %{time_total} s" -X POST -H "Content-Length: 0" --user "foobar:password" http://localhost:5000/api/ratelimited/simple); sleep 0.5; done

    You will see some of your requests return a 200, and at least one request return a 429. How many depends on the time at which you start sending the request. Recall, the requests are time-boxed on single-minute windows, so if you transition to the next minute in the middle of the 21 requests, the counter will reset. Hence, you should expect to receive somewhere between 10 and 20 OK results and between 1 and 11 429 results. The Response should look something like this:

     HTTP 200, 0.002680 s
    HTTP 200, 0.001535 s
    HTTP 200, 0.001653 s
    HTTP 200, 0.001449 s
    HTTP 200, 0.001604 s
    HTTP 200, 0.001423 s
    HTTP 200, 0.001492 s
    HTTP 200, 0.001449 s
    HTTP 200, 0.001551 s
    {"status":429,"traceId":"00-16e9da63f77c994db719acff5333c509-f79ac0c862c5a04c-00"} HTTP 429, 0.001803 s
    {"status":429,"traceId":"00-3d2e4e8af851024db121935705d5425f-0e23eb80eae0d549-00"} HTTP 429, 0.001521 s
    {"status":429,"traceId":"00-b5e824c9ebc4f94aa0bda2a414afa936-8020a7b8f2845544-00"} HTTP 429, 0.001475 s
    {"status":429,"traceId":"00-bd6237c5d0362a409c436dcffd0d4a7a-87b544534f397247-00"} HTTP 429, 0.001549 s
    {"status":429,"traceId":"00-532d64033c54a148a98d8efe1f9f53b2-b1dbdc7d8fbbf048-00"} HTTP 429, 0.001476 s
    {"status":429,"traceId":"00-8c210b1c1178554fb10aa6a7540d3488-0fedba48e38fdd4b-00"} HTTP 429, 0.001606 s
    {"status":429,"traceId":"00-633178f569dc8c46badb937c0363cda8-ab1d1214b791644d-00"} HTTP 429, 0.001661 s
    {"status":429,"traceId":"00-12f01e448216c64b8bfe674f242a226f-d90ff362926aa14e-00"} HTTP 429, 0.001858 s
    {"status":429,"traceId":"00-63ef51cee3bcb6488b04395f09d94def-be9e4d6d6057754a-00"} HTTP 429, 0.001622 s
    {"status":429,"traceId":"00-80a971db60fdf543941e2457e35ac2fe-3555f5cb9c907e4c-00"} HTTP 429, 0.001710 s
    {"status":429,"traceId":"00-f718734ae0285343ac927df617eeef92-91a49e127f2e4245-00"} HTTP 429, 0.001582 s
    {"status":429,"traceId":"00-9da2569cce4d714480dd4f0edc0506d2-8a1ce375b1a9504f-00"} HTTP 429, 0.001629 s

    Resources

    Redis Launchpad
    - + \ No newline at end of file diff --git a/develop/dotnet/aspnetcore/rate-limiting/middleware/index.html b/develop/dotnet/aspnetcore/rate-limiting/middleware/index.html index 9f2b64a80c..3093ea6d7d 100644 --- a/develop/dotnet/aspnetcore/rate-limiting/middleware/index.html +++ b/develop/dotnet/aspnetcore/rate-limiting/middleware/index.html @@ -4,7 +4,7 @@ Configurable Sliding Window Rate Limiting Middleware for Redis & ASP.NET Core | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Configurable Sliding Window Rate Limiting Middleware for Redis & ASP.NET Core

    Let's consider the case (which is probably most cases) where we have multiple endpoints we want to rate limit; it doesn't make an awful lot of sense to embed rate-limiting in those cases in the logic of the routes themselves. Instead, have something that will intercept requests and check to see if the request is rate-limited before moving onto the appropriate endpoint. To accomplish this, we'll build some middleware for just this purpose. And with some light configuration work, we'll be able to build some middleware to handle a configurable set of limits.

    Prerequisites

    Startup Redis

    Before we begin, startup Redis. For this example, we'll use the Redis docker image:

    docker run -p 6379:6379 redis

    Create Project

    In your terminal, navigate to where you want the app to live and run:

    dotnet new webapi -n RateLimitingMiddleware --no-https

    Cd into the RateLimitingMiddleware folder and run the command dotnet add package StackExchange.Redis.

    Open RateLimitingMiddleware.csproj in Rider, Visual Studio, or open the folder in VS Code. Then, in the Controllers folder, add an API controller called RateLimitedController. When all this is complete, RateLimitedController.cs should look like the following:

    namespace SlidingWindowRateLimiter.Controllers
    {
    [ApiController]
    [Route("api/[controller]")]
    public class RateLimitedController : ControllerBase
    {
    }
    }

    Create Configuration Object

    Now it's time to dig into the logic behind this middleware. The first thing we ought to do is consider the configurations we will use to configure our middleware. We'll consider configuration objects to contain the following form in our application configuration:

    {
    "RedisRateLimits": [
    {
    "Path": "/api/ratelimited/limited",
    "Window": "30s",
    "MaxRequests": 5
    },
    {
    "PathRegex": "/api/*",
    "Window": "1d",
    "MaxRequests": 1000
    }
    ]
    }

    In other words, we have four parameters.

    Parameter NameDescription
    Pathliteral path to be rate-limited if the path matches completely, it will trigger a rate limit check
    PathRegexPath regex to be rate-limited; if path matches, it will trigger a rate limit check
    WindowThe Sliding Window to Rate Limit on should match the pattern `([0-9]+(smdh))`
    MaxRequestsThe maximum number of requests allowable over the period

    And those parameters are going to be stored under the configuration node RedisRateLimits in our configuration.

    Build Config Object

    The configuration objects we'll use for this will contain the logic of the rule and some parsing logic to handle parsing the timeouts from the window pattern. So we'll create a new class called RateLimitRule In this class, we'll add a regex to do the pattern matching for our window:

    public class RateLimitRule
    {

    }

    Time Regex

    private static readonly Regex TimePattern = new ("([0-9]+(s|m|d|h))");

    Time Unit Enum

    Then we'll create an enum that we'll store the unit half of the window size in:

    private enum TimeUnit
    {
    s = 1,
    m = 60,
    h = 3600,
    d = 86400
    }

    Parse Time

    We are going to measure time windows in seconds (as that will be the most native thing for Redis), so we will now need to have a method to convert our time window to seconds:

    private static int ParseTime(string timeStr)
    {
    var match = TimePattern.Match(timeStr);
    if (string.IsNullOrEmpty(match.Value))
    throw new ArgumentException("Rate limit window was not provided or was not " +
    "properly formatted, must be of the form ([0-9]+(s|m|d|h))");
    var unit = Enum.Parse<TimeUnit>(match.Value.Last().ToString());
    var num = int.Parse(match.Value.Substring(0, match.Value.Length - 1));
    return num * (int) unit;
    }

    Add properties

    Next, we'll need to add the Properties of this class so that we don't have to repeat computation. We'll store _windowSeconds in a separate private field:

    public string Path { get; set; }
    public string PathRegex { get; set; }
    public string Window { get; set; }
    public int MaxRequests { get; set; }
    internal int _windowSeconds = 0;
    internal string PathKey => string.IsNullOrEmpty(Path) ? Path : PathRegex;
    internal int WindowSeconds
    {
    get
    {
    if (_windowSeconds < 1)
    {
    _windowSeconds = ParseTime(Window);
    }
    return _windowSeconds;
    }
    }

    Match Path

    Finally, we'll perform the pattern matching against the path:

    public bool MatchPath(string path)
    {
    if (!string.IsNullOrEmpty(Path))
    {
    return path.Equals(Path, StringComparison.InvariantCultureIgnoreCase);
    }
    if (!string.IsNullOrEmpty(PathRegex))
    {
    return Regex.IsMatch(path, PathRegex);
    }
    return false;
    }

    Writing our Lua Script

    We need to write a Lua script that will consider all the rules applicable to a particular user on a specific endpoint. We'll use sorted sets to check the rate limits for each rule and user. On each request, it will take each applicable rule and:

    1. Check the current time
    2. Trim off entries that fall outside the window
    3. Check if another request violates the rule
      • If the request would violate any rules return 1
    4. For each applicable rule
    5. Add a new entry to the sorted set with the score of the current time in seconds, and a member name of the current time in microseconds
    6. Return 0

    As we have an undetermined number of rules ahead of time, it's impossible to use the StackExchange.Redis Library's, but we can still use a Lua script to accomplish this.

    local current_time = redis.call('TIME')
    local num_windows = ARGV[1]
    for i=2, num_windows*2, 2 do
    local window = ARGV[i]
    local max_requests = ARGV[i+1]
    local curr_key = KEYS[i/2]
    local trim_time = tonumber(current_time[1]) - window
    redis.call('ZREMRANGEBYSCORE', curr_key, 0, trim_time)
    local request_count = redis.call('ZCARD',curr_key)
    if request_count >= tonumber(max_requests) then
    return 1
    end
    end
    for i=2, num_windows*2, 2 do
    local curr_key = KEYS[i/2]
    local window = ARGV[i]
    redis.call('ZADD', curr_key, current_time[1], current_time[1] .. current_time[2])
    redis.call('EXPIRE', curr_key, window)
    end
    return 0

    The above script has an undetermined number of arguments and an undetermined number of keys ahead of time. As such, it's essential to make sure that all the keys are on the same shard, so when we build the keys, which will be of the form path_pattern:apiKey:window_size_seconds, we will surround the common part of the key apiKey with braces {apiKey}.

    Build The Middleware

    Now it's time to actually build the middleware. Add a new file SlidingWindowRateLimiter.cs Inside that file, add two classes SlidingWindowRateLimiter and SlidingWindowRateLimiterExtensions

    In the SlidingWindowRateLimiterExtensions class, add one method to add the SlidingWIndowRateLimiter to the middleware pipeline, that class will look like this when completed:

    public static class SlidingWindowRateLimiterExtensions
    {
    public static void UseSlidingWindowRateLimiter(this IApplicationBuilder builder)
    {
    builder.UseMiddleware<SlidingWindowRateLimiter>();
    }
    }

    In the SlidingWindowRateLimiter class, start by adding the script mentioned above as a const string for the class:

    private const string SlidingRateLimiter = @"
    local current_time = redis.call('TIME')
    local num_windows = ARGV[1]
    for i=2, num_windows*2, 2 do
    local window = ARGV[i]
    local max_requests = ARGV[i+1]
    local curr_key = KEYS[i/2]
    local trim_time = tonumber(current_time[1]) - window
    redis.call('ZREMRANGEBYSCORE', curr_key, 0, trim_time)
    local request_count = redis.call('ZCARD',curr_key)
    if request_count >= tonumber(max_requests) then
    return 1
    end
    end
    for i=2, num_windows*2, 2 do
    local curr_key = KEYS[i/2]
    local window = ARGV[i]
    redis.call('ZADD', curr_key, current_time[1], current_time[1] .. current_time[2])
    redis.call('EXPIRE', curr_key, window)
    end
    return 0
    ";

    Constructor

    We need to seed this class with an IDatabase to access redis, an IConfiguration to access the configuration, and of course, the next chain in the pipeline to continue. So consequentially, we'll dependency inject all this into our middleware:

    private readonly IDatabase _db;
    private readonly IConfiguration _config;
    private readonly RequestDelegate _next;

    public SlidingWindowRateLimiter(RequestDelegate next, IConnectionMultiplexer muxer, IConfiguration config)
    {
    _db = muxer.GetDatabase();
    _config = config;
    _next = next;
    }

    Extract Api Key

    In this case, we will use basic auth, so we will be using the username from the basic auth structure as our apiKey. We will need a method to extract it consequentially:

    private static string GetApiKey(HttpContext context)
    {
    var encoded = string.Empty;
    var auth = context.Request.Headers["Authorization"];
    if (!string.IsNullOrEmpty(auth)) encoded = AuthenticationHeaderValue.Parse(auth).Parameter;
    if (string.IsNullOrEmpty(encoded)) return encoded;
    return Encoding.UTF8.GetString(Convert.FromBase64String(encoded)).Split(':')[0];
    }

    Extract Applicable Rules

    From the configuration structure we generated before, we will pull out the RedisRateLimits section and stuff it into an array of RateLimitRule objects. We then need to pull out the rules that apply to the current path, group them by the number of seconds in their windows and by the path key component that's relevant for them. If we have identical path keys, e.g., two instances of ^/api/*, we'll take the more restrictive one(fewest allowable requests). We can pull the with a LINQ query:

    public IEnumerable<RateLimitRule> GetApplicableRules(HttpContext context)
    {
    var limits = _config.GetSection("RedisRateLimits").Get<RateLimitRule[]>();
    var applicableRules = limits
    .Where(x => x.MatchPath(context.Request.Path))
    .OrderBy(x => x.MaxRequests)
    .GroupBy(x => new{x.PathKey, x.WindowSeconds})
    .Select(x=>x.First());
    return applicableRules;
    }

    Check Limitation

    Our next step is to check to see if the key is currently under a limitation. Our script expects an array of redis keys of the pattern mentioned above path_pattern:{apiKey}:window_size_seconds, then it needs the number of rules to be enforced, and finally, it needs the rules appended in window_size num_requests order. With the arguments all generated for the script, all we need to do is to evaluate the script and check if it returns one or not:

    private async Task<bool> IsLimited( IEnumerable<RateLimitRule> rules, string apiKey)
    {
    var keys = rules.Select(x => new RedisKey($"{x.PathKey}:{{{apiKey}}}:{x.WindowSeconds}")).ToArray();
    var args = new List<RedisValue>{rules.Count()};
    foreach (var rule in rules)
    {
    args.Add(rule.WindowSeconds);
    args.Add(rule.MaxRequests);
    }
    return (int) await _db.ScriptEvaluateAsync(SlidingRateLimiter, keys,args.ToArray()) == 1;
    }

    Block or Allow

    Finally, in the InvokeAsync method for our middleware, we will glue all this together. First, we'll parse out the apiKey. If the apiKey isn't present, we'll return a 401. Otherwise, we will perform the rate-limiting checks and either throttle or proceed as appropriate.

    public async Task InvokeAsync(HttpContext httpContext)
    {
    var apiKey = GetApiKey(httpContext);
    if (string.IsNullOrEmpty(apiKey))
    {
    httpContext.Response.StatusCode = 401;
    return;
    }
    var applicableRules = GetApplicableRules(httpContext);
    var limited = await IsLimited(applicableRules, apiKey);
    if (limited)
    {
    httpContext.Response.StatusCode = 429;
    return;
    }
    await _next(httpContext);
    }

    Build Controller

    Under the Controllers Folder, add a class named RateLimitedController. Then, in this controller, declare a new ApiController.

    [ApiController]
    [Route("api/[controller]")]
    public class RateLimitedController : ControllerBase
    {
    }

    In this class, add two new routes, one to limited and indirectly-limited

    [HttpGet]
    [HttpPost]
    [Route("limited")]
    public async Task<IActionResult> Limited()
    {
    return new JsonResult(new {Limited = false});
    }

    [HttpGet]
    [HttpPost]
    [Route("indirectly-limited")]
    public async Task<IActionResult> IndirectlyLimited()
    {
    return new JsonResult(new {NeverLimited = true});
    }

    Add Middleware to App

    Open up startup.cs

    In the ConfigureServices method, add the following line:

    services.AddSingleton<IConnectionMultiplexer>(ConnectionMultiplexer.Connect("localhost"));

    In the method Configure method, add the following line:

    app.UseSlidingWindowRateLimiter();

    Configure the App

    In appsettings.json, or appsettings.Development.json, add a configuration item for the rate limits:

    "RedisRateLimits":[
    {
    "Path":"/api/RateLimited/limited",
    "Window":"30s",
    "MaxRequests": 5
    },
    {
    "PathRegex":"^/api/*",
    "Window":"1h",
    "MaxRequests": 50
    }
    ]

    Test it Out

    All that's left is to test it out. If you go to your terminal and run dotnet run you can try out each of the two endpoints they are available at

    http://localhost:5000/api/ratelimited/limited and http://localhost:5000/api/ratelimited/indirectly-limited

    You can hit these endpoints repeatedly using:

    for n in {1..7}; do echo $(curl -s -w " HTTP %{http_code}, %{time_total} s" -X POST -H "Content-Length: 0" --user "foobar:password" http://localhost:5000/api/ratelimited/limited); sleep 0.5; done

    Which will send seven requests, two of which will be rejected after that if you run

    for n in {1..47}; do echo $(curl -s -w " HTTP %{http_code}, %{time_total} s" -X POST -H "Content-Length: 0" --user "foobar:password" http://localhost:5000/api/ratelimited/indirectly-limited); sleep 0.5; done

    It should reject another two as throttled.

    Resources

    • The source code for this tutorial is located in GitHub
    - + \ No newline at end of file diff --git a/develop/dotnet/aspnetcore/rate-limiting/sliding-window/index.html b/develop/dotnet/aspnetcore/rate-limiting/sliding-window/index.html index 768a72ce2a..f41d3244e2 100644 --- a/develop/dotnet/aspnetcore/rate-limiting/sliding-window/index.html +++ b/develop/dotnet/aspnetcore/rate-limiting/sliding-window/index.html @@ -4,7 +4,7 @@ How to implement Sliding Window Rate Limiting app using ASP.NET Core & Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    How to implement Sliding Window Rate Limiting app using ASP.NET Core & Redis

    In this tutorial, we'll learn how to build a sliding window rate limiter for ASP.NET Core using Redis.

    What is A Sliding Window Rate Limiter

    The pattern that we are implementing here is a sliding window rate limiter. A sliding window rate limiter, unlike a fixed window, restricts requests for a discrete window prior to the current request under evaluation. As opposed to a fixed window rate limiter which groups the requests into a bucket based on a very definitive time window. For example, if you have a 10 req/minute rate limiter, on a fixed window, you could encounter a case where the rate-limiter allows 20 requests inside of a minute. That's because if first 10 requests are on the left hand side of the current window, and the next 20 requests are on the right hand side of the window, both have enough space in their respective buckets to be allowed through. If you sent those same 20 requests through a sliding window limited rate limiter on the other hand, if they are all sent within 60 seconds of each other, only 10 will make it through. Using Sorted Sets and Lua scripts, implementing one of these rate limiters is a breeze.

    Prerequisites

    Startup Redis

    Before we begin, startup Redis. For this example, we'll use the Redis docker image:

    docker run -p 6379:6379 redis

    Create Project

    In your terminal, navigate to where you want the app to live and run:

    dotnet new webapi -n SlidingWindowRateLimiter --no-https

    Cd into the SlidingWindowRateLimiter folder and run the command dotnet add package StackExchange.Redis.

    Open SlidingWindowRateLimiter.csproj in Rider, Visual Studio, or open the folder in VS Code. In the Controllers folder, add an API controller called RateLimitedController, when all this is complete, RateLimitedController.cs should look like the following:

    namespace SlidingWindowRateLimiter.Controllers
    {
    [ApiController]
    [Route("api/[controller]")]
    public class RateLimitedController : ControllerBase
    {
    }
    }

    Initialize The Multiplexer

    To use Redis, we're going to initialize an instance of the ConnectionMultiplexer from StackExchange.Redis, to do so, go to the ConfigureServices method inside of Startup.cs and add the following line:

    services.AddSingleton<IConnectionMultiplexer>(ConnectionMultiplexer.Connect("localhost"));

    Inject the ConnectionMultiplexer

    In RateLimitedController.cs inject the ConnectionMultiplexer into the controller and pull out an IDatabase object from it with the following:

    private readonly IDatabase _db;
    public RateLimitedController(IConnectionMultiplexer mux)
    {
    _db = mux.GetDatabase();
    }

    Add a Simple Route

    We will add a simple route that we will Rate Limit; it will be a POST request route on our controller. This POST request will use Basic auth - this means that each request is going to expect a header of the form Authorization: Basic <base64encoded> where the base64encoded will be a string of the form apiKey:apiSecret base64 encoded, e.g. Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==. This route will parse the key out of the header and return an OK result.

    [HttpPost]
    [HttpGet]
    [Route("sliding")]
    public async Task<IActionResult> Sliding([FromHeader]string authorization)
    {
    var encoded = string.Empty;
    if(!string.IsNullOrEmpty(authorization)) encoded = AuthenticationHeaderValue.Parse(authorization).Parameter;
    if (string.IsNullOrEmpty(encoded)) return new UnauthorizedResult();
    var apiKey = Encoding.UTF8.GetString(Convert.FromBase64String(encoded)).Split(':')[0];
    return Ok();
    }

    With that setup, you should run the project with a dotnet run, and if you issue a POST request to https://localhost:5001/api/RateLimited/sliding - with apiKey foobar and password password, you will get a 200 OK response back.

    You can use this cURL request to elicit that response:

    curl -X POST -H "Content-Length: 0" --user "foobar:password" http://localhost:5000/api/RateLimited/single

    Sliding Window Rate Limiter Lua Script

    To implement this pattern we will need to do the following:

    1. The client will create a key for the server to check, this key will be of the format route:apikey
    2. That key will map to a sorted set in Redis, we will check the current time, and shave off any requests in the sorted set that are outside of our window
    3. We will then check the cardinality of the sorted set
    4. If the cardinality is less than our limit, we will
      1. Add a new member to our sorted set with a score of the current time in seconds, and a member of the current time in microseconds
      2. Set the expiration for our sorted set to the window length
      3. return 0
    5. If the cardinality is greater than or equal to our limit we will return 1

    The trick here is that everything needs to happen atomically, we want to be able to trim the set, check its cardinality, add an item to it, and set it's expiration, all without anything changing in the interim. Fortunately this is a perfect place to use a Lua Script. Specifically we are going to be using the StackExchange script preparation engine to drive our lua script, meaning we can use @variable_name in place of a particular position in ARGV or KEYS in the script. Our Lua script will be:

    local current_time = redis.call('TIME')
    local trim_time = tonumber(current_time[1]) - @window
    redis.call('ZREMRANGEBYSCORE', @key, 0, trim_time)
    local request_count = redis.call('ZCARD',@key)

    if request_count < tonumber(@max_requests) then
    redis.call('ZADD', @key, current_time[1], current_time[1] .. current_time[2])
    redis.call('EXPIRE', @key, @window)
    return 0
    end
    return 1

    In order to use that in our app, we will create a new static class called Scripts which will hold the text of the script, and prepare the script to run with StackExchange.Redis. Create a new file called Scripts.cs and add the following to it.

    using StackExchange.Redis;

    namespace SlidingWindowRateLimiter
    {
    public static class Scripts
    {
    public static LuaScript SlidingRateLimiterScript => LuaScript.Prepare(SlidingRateLimiter);
    private const string SlidingRateLimiter = @"
    local current_time = redis.call('TIME')
    local trim_time = tonumber(current_time[1]) - @window
    redis.call('ZREMRANGEBYSCORE', @key, 0, trim_time)
    local request_count = redis.call('ZCARD',@key)

    if request_count < tonumber(@max_requests) then
    redis.call('ZADD', @key, current_time[1], current_time[1] .. current_time[2])
    redis.call('EXPIRE', @key, @window)
    return 0
    end
    return 1
    ";
    }
    }

    Update the Controller for rate limiting

    Back in our RateLimitedController Sliding method, we will add a few lines of code to check if we should throttle the API request, replace the return statement with the following:

    var limited = ((int) await _db.ScriptEvaluateAsync(Scripts.SlidingRateLimiterScript,
    new {key = new RedisKey($"{Request.Path}:{apiKey}"), window = 30, max_requests = 10})) == 1;
    return limited ? new StatusCodeResult(429) : Ok();

    The whole method should look like this now:

    [HttpPost]
    [HttpGet]
    [Route("sliding")]
    public async Task<IActionResult> Sliding([FromHeader] string authorization)
    {
    var encoded = string.Empty;
    if(!string.IsNullOrEmpty(authorization)) encoded = AuthenticationHeaderValue.Parse(authorization).Parameter;
    if (string.IsNullOrEmpty(encoded)) return new UnauthorizedResult();
    var apiKey = Encoding.UTF8.GetString(Convert.FromBase64String(encoded)).Split(':')[0];
    var limited = ((int) await _db.ScriptEvaluateAsync(Scripts.SlidingRateLimiterScript,
    new {key = new RedisKey($"{Request.Path}:{apiKey}"), window = 30, max_requests = 10})) == 1;
    return limited ? new StatusCodeResult(429) : Ok();
    }

    Now, if we start our server back up with dotnet run and try running the following command:

    for n in {1..20}; do echo $(curl -s -w " HTTP %{http_code}, %{time_total} s" -X POST -H "Content-Length: 0" --user "foobar:password" http://localhost:5000/api/ratelimited/sliding); sleep 0.5; done

    You will see some of your requests return a 200, and 10 will return a 429. If you wait for some and run the above command again you may see some behavior where every other request goes through. That's because the window slides every second and only the previous 30 seconds requests are considered when determining whether to throttle the request. The above command the first time will produce an output something like this:

    HTTP 200, 0.081806 s
    HTTP 200, 0.003170 s
    HTTP 200, 0.002217 s
    HTTP 200, 0.001632 s
    HTTP 200, 0.001508 s
    HTTP 200, 0.001928 s
    HTTP 200, 0.001647 s
    HTTP 200, 0.001656 s
    HTTP 200, 0.001699 s
    HTTP 200, 0.001667 s
    {"status":429,"traceId":"00-4af32d651483394292e35258d94ec4be-6c174cc42ca1164c-00"} HTTP 429, 0.012612 s
    {"status":429,"traceId":"00-7b24da2422f5b144a1345769e210b78a-75cc1deb1f260f46-00"} HTTP 429, 0.001688 s
    {"status":429,"traceId":"00-0462c9d489ce4740860ae4798e6c4869-2382f37f7e112741-00"} HTTP 429, 0.001578 s
    {"status":429,"traceId":"00-127f5493caf8e044a9f29757fbf91f0a-62187f6cf2833640-00"} HTTP 429, 0.001722 s
    {"status":429,"traceId":"00-89a4c2f7e2021a4d90264f9d040d250c-34443a5fdb2cff4f-00"} HTTP 429, 0.001718 s
    {"status":429,"traceId":"00-f1505b800f30da4b993bebb89f902401-dfbadcb1bc3b8e45-00"} HTTP 429, 0.001663 s
    {"status":429,"traceId":"00-621cf2b2f32c184fb08d0d483788897d-1c01af67cf88d440-00"} HTTP 429, 0.001601 s
    {"status":429,"traceId":"00-e310ba5214d7874dbd653a8565f38df4-216f1a4b8c4b574a-00"} HTTP 429, 0.001456 s
    {"status":429,"traceId":"00-52a7074239a5e84c9ded96166c0ef042-4dfedf1d60e3fd46-00"} HTTP 429, 0.001550 s
    {"status":429,"traceId":"00-5e03e785895f2f459c85ade852664703-c9ad961397284643-00"} HTTP 429, 0.001535 s
    {"status":429,"traceId":"00-ba2ac0f8fd902947a4789786b0f683a8-be89b14fa88d954c-00"} HTTP 429, 0.001451 s

    Resources

    • You can find the code used for this tutorial in GitHub
    - + \ No newline at end of file diff --git a/develop/dotnet/index.html b/develop/dotnet/index.html index 1f71b1180b..66dcee9dd9 100644 --- a/develop/dotnet/index.html +++ b/develop/dotnet/index.html @@ -4,7 +4,7 @@ .NET and Redis | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    .NET and Redis


    Profile picture for Steve Lorello
    Author:
    Steve Lorello, Senior Field Engineer at Redis

    Getting Started

    The .NET Community has built many client libraries to help handle requests to Redis Server. In this guide, we'll mostly be concerned with using the StackExchange.Redis client library. As the name implies the StackExchange client is developed by StackExchange for use on popular websites like StackOverflow.

    Step 1. Install the Package

    There are a few ways to Install the Package:

    Run the following in the directory of the csproj file you want to add the package too.

      dotnet add package StackExchange.Redis

    Step 2. Import the Required Namespace

    using StackExchange.Redis;

    Step 3. Initialize the ConnectionMultiplexer

    The ConnectionMultiplexer is the main arbiter of the connection to Redis inside the CLR, your application should maintain a single instance of the ConnectionMultiplexer throughout its runtime. You can initialize the Multiplexer with either a connection string, or with a ConfigurationOptions object. A typical connection string is of the form: HOST_NAME:PORT_NUMBER,password=PASSWORD where HOST_NAME is the host name of your server (e.g. localhost), PORT_NUMBER is the port number Redis is listening on (e.g. 6379) and PASSWORD is your redis server's password (e.g. secret_password).

    static readonly ConnectionMultiplexer _redis = ConnectionMultiplexer.Connect($"{HOST_NAME}:{PORT_NUMBER},password={PASSWORD}");

    Step 4. Grab Database Connection

    Once we have a handle for the Multiplexer, we need get a connection to the database.

    var db = _redis.GetDatabase();

    Step 5. Use the connection

    Now that you've retreived the connection to the database, all that's left is to use it. Here are some simple operations:

    db.Ping();

    Redis Launchpad

    Redis Launchpad is like an “App Store” for Redis sample apps. You can easily find apps for your preferred frameworks and languages. Check out a few of these apps below, or click here to access the complete list.

    Rate Limiting App in .NET

    Launchpad

    Leaderboard App in .NET

    Launchpad

    API Caching .NET

    Launchpad

    Basic Chat App .NET

    Launchpad

    Additional Resources

    - + \ No newline at end of file diff --git a/develop/dotnet/redis-om-dotnet/add-and-retrieve-objects/index.html b/develop/dotnet/redis-om-dotnet/add-and-retrieve-objects/index.html index 86fca08374..d8f4db534e 100644 --- a/develop/dotnet/redis-om-dotnet/add-and-retrieve-objects/index.html +++ b/develop/dotnet/redis-om-dotnet/add-and-retrieve-objects/index.html @@ -4,7 +4,7 @@ Add and Retrieve Objects | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Add and Retrieve Objects

    The Redis OM library supports declarative storage and retrieval of objects from Redis. Without Redis Stack, this is limited to using hashes, and id lookups of objects in Redis. You will still use the Document Attribute to decorate a class you'd like to store in Redis. From there, all you need to do is either call Insert or InsertAsync on the RedisCollection or Set or SetAsync on the RedisConnection, passing in the object you want to set in Redis. You can then retrieve those objects with Get<T> or GetAsync<T> with the RedisConnection or with FindById or FindByIdAsync in the RedisCollection.

    public class Program
    {
    [Document(Prefixes = new []{"Employee"})]
    public class Employee
    {
    [RedisIdField]
    public string Id{ get; set; }

    public string Name { get; set; }

    public int Age { get; set; }

    public double Sales { get; set; }

    public string Department { get; set; }
    }

    static async Task Main(string[] args)
    {
    var provider = new RedisConnectionProvider("redis://localhost:6379");
    var connection = provider.Connection;
    var employees = provider.RedisCollection<Employee>();
    var employee1 = new Employee{Name="Bob", Age=32, Sales = 100000, Department="Partner Sales"};
    var employee2 = new Employee{Name="Alice", Age=45, Sales = 200000, Department="EMEA Sales"};
    var idp1 = await connection.SetAsync(employee1);
    var idp2 = await employees.InsertAsync(employee2);

    var reconstitutedE1 = await connection.GetAsync<Employee>(idp1);
    var reconstitutedE2 = await employees.FindByIdAsync(idp2);
    Console.WriteLine($"First Employee's name is {reconstitutedE1.Name}, they are {reconstitutedE1.Age} years old, " +
    $"they work in the {reconstitutedE1.Department} department and have sold {reconstitutedE1.Sales}, " +
    $"their ID is: {reconstitutedE1.Id}");
    Console.WriteLine($"Second Employee's name is {reconstitutedE2.Name}, they are {reconstitutedE2.Age} years old, " +
    $"they work in the {reconstitutedE2.Department} department and have sold {reconstitutedE2.Sales}, " +
    $"their ID is: {reconstitutedE2.Id}");
    }
    }

    The Code above will declare an Employee class, and allow you to add employees to Redis, and then retrieve Employees from Redis the output from this method will look like this:

    First Employee's name is Bob, they are 32 years old, they work in the Partner Sales department and have sold 100000, their ID is: 01FHDFE115DKRWZW0XNF17V2RK
    Second Employee's name is Alice, they are 45 years old, they work in the EMEA Sales department and have sold 200000, their ID is: 01FHDFE11T23K6FCJQNHVEF92F

    If you wanted to find them in Redis directly you could run HGETALL Employee:01FHDFE115DKRWZW0XNF17V2RK and that will retrieve the Employee object as a Hash from Redis. If you do not specify a prefix, the prefix will be the fully-qualified class name.

    - + \ No newline at end of file diff --git a/develop/dotnet/redis-om-dotnet/aggregations/apply-functions/index.html b/develop/dotnet/redis-om-dotnet/aggregations/apply-functions/index.html index e7e0b15c5d..311d3a37a7 100644 --- a/develop/dotnet/redis-om-dotnet/aggregations/apply-functions/index.html +++ b/develop/dotnet/redis-om-dotnet/aggregations/apply-functions/index.html @@ -4,7 +4,7 @@ Apply Functions | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Apply Functions

    Apply functions are functions that you can define as expressions to apply to your data in Redis. In essence, they allow you to combine your data together, and extract the information you want.

    Data Model

    For the remainder of this article we will be using this data model:

    [Document]
    public class Employee
    {
    [Indexed(Aggregatable = true)]
    public string Name { get; set; }

    [Indexed]
    public GeoLoc? HomeLoc { get; set; }

    [Indexed(Aggregatable = true)]
    public int Age { get; set; }

    [Indexed(Aggregatable = true)]
    public double Sales { get; set; }

    [Indexed(Aggregatable = true)]
    public double SalesAdjustment { get; set; }

    [Searchable(Aggregatable = true)]
    public string Department { get; set; }

    [Indexed(Aggregatable = true)]
    public long LastOnline { get; set; } = DateTimeOffset.UtcNow.ToUnixTimeSeconds();
    }

    Anatomy of an Apply Function

    Apply is a method on the RedisAggregationSet<T> class which takes two arguments, each of which is a component of the apply function.

    First it takes the expression that you want Redis to execute on every record in the pipeline, this expression takes a single parameter, an AggregationResult<T>, where T is the generic type of your RedisAggregationSet. This AggregationResult has two things we should think about, first it contains a RecordShell which is a placeholder for the generic type, and secondly it has an Aggregations property - which is a dictionary containing the results from your pipeline. Both of these can be used in apply functions.

    The second component is the alias, that's the name the result of the function is stored in when the pipeline executes.

    Adjusted Sales

    Our data model has two properties related to sales, Sales, how much the employee has sold, and SalesAdjustment, a figure used to adjust sales based off various factors, perhaps territory covered, experience, etc. . . The idea being that perhaps a fair way to analyze an employee's performance is a combination of these two fields rather than each individually. So let's say we wanted to find what everyone's adjusted sales were, we could do that by creating an apply function to calculate it.

    var adjustedSales = employeeAggregations.Apply(x => x.RecordShell.SalesAdjustment * x.RecordShell.Sales,
    "ADJUSTED_SALES");
    foreach (var result in adjustedSales)
    {
    Console.WriteLine($"Adjusted Sales were: {result["ADJUSTED_SALES"]}");
    }

    Arithmetic Apply Functions

    Functions that use arithmetic and math can use the mathematical operators + for addition, - for subtraction, * for multiplication, / for division, and % for modular division, also the ^ operator, which is typically used for bitiwise exclusive-or operations, has been reserved for power functions. Additionally, you can use many System.Math library operations within Apply functions, and those will be translated to the appropriate methods for use by Redis.

    Available Math Functions

    FunctionTypeDescriptionExample
    Log10Mathyields the 10 base log for the numberMath.Log10(x["AdjustedSales"])
    AbsMathyields the absolute value of the provided numberMath.Abs(x["AdjustedSales"])
    CeilMathyields the smallest integer not less than the provided numberMath.Ceil(x["AdjustedSales"])
    FloorMathyields the smallest integer not greater than the provided numberMath.Floor(x["AdjustedSales"])
    LogMathyields the Log base 2 for the provided numberMath.Log(x["AdjustedSales"])
    ExpMathyields the natural exponent for the provided number (e^y)Math.Exp(x["AdjustedSales"])
    SqrtMathyields the Square root for the provided numberMath.Sqrt(x["AdjustedSales"])

    String Functions

    You can also apply multiple string functions to your data, if for example you wanted to create a birthday message for each employee you could do so by calling String.Format on your records:

    var birthdayMessages = employeeAggregations.Apply(x =>
    string.Format("Congratulations {0} you are {1} years old!", x.RecordShell.Name, x.RecordShell.Age), "message");
    await foreach (var message in birthdayMessages)
    {
    Console.WriteLine(message["message"].ToString());
    }

    List of String Functions:

    FunctionTypeDescriptionExample
    ToUpperStringyields the provided string to upper casex.RecordShell.Name.ToUpper()
    ToLowerStringyields the provided string to lower casex.RecordShell.Name.ToLower()
    StartsWithStringBoolean expression - yields 1 if the string starts with the argumentx.RecordShell.Name.StartsWith("bob")
    ContainsStringBoolean expression - yields 1 if the string contains the argumentx.RecordShell.Name.Contains("bob")
    SubstringStringyields the substring starting at the given 0 based index, the length of the second argument, if the second argument is not provided, it will simply return the balance of the stringx.RecordShell.Name.Substring(4, 10)
    FormatstringFormats the string based off the provided patternstring.Format("Hello {0} You are {1} years old", x.RecordShell.Name, x.RecordShell.Age)
    SplitstringSplit's the string with the provided string - unfortunately if you are only passing in a single splitter, because of how expressions work, you'll need to provide string split options so that no optional parameters exist when building the expression, just pass StringSplitOptions.Nonex.RecordShell.Name.Split(",", StringSplitOptions.None)

    Time Functions

    You can also perform functions on time data in Redis. If you have a timestamp stored in a useable format, a unix timestamp or a timestamp string that can be translated from strftime, you can operate on them. For example if you wanted to translate a unix timestamp to YYYY-MM-DDTHH:MM::SSZ you can do so by just calling ApplyFunctions.FormatTimestamp on the record inside of your apply function. E.g.

    var lastOnline = employeeAggregations.Apply(x => ApplyFunctions.FormatTimestamp(x.RecordShell.LastOnline),
    "LAST_ONLINE_STRING");

    foreach (var employee in lastOnline)
    {
    Console.WriteLine(employee["LAST_ONLINE_STRING"].ToString());
    }

    Time Functions Available

    FunctionTypeDescriptionExample
    ApplyFunctions.FormatTimestamptimetransforms a unix timestamp to a formatted time string based off strftime conventionsApplyFunctions.FormatTimestamp(x.RecordShell.LastTimeOnline)
    ApplyFunctions.ParseTimetimeParsers the provided formatted timestamp to a unix timestampApplyFunctions.ParseTime(x.RecordShell.TimeString, "%FT%ZT")
    ApplyFunctions.DaytimeRounds a unix timestamp to the beginning of the dayApplyFunctions.Day(x.RecordShell.LastTimeOnline)
    ApplyFunctions.HourtimeRounds a unix timestamp to the beginning of current hourApplyFunctions.Hour(x.RecordShell.LastTimeOnline)
    ApplyFunctions.MinutetimeRound a unix timestamp to the beginning of the current minuteApplyFunctions.Minute(x.RecordShell.LastTimeOnline)
    ApplyFunctions.MonthtimeRounds a unix timestamp to the beginning of the current monthApplyFunctions.Month(x.RecordShell.LastTimeOnline)
    ApplyFunctions.DayOfWeektimeConverts the unix timestamp to the day number with Sunday being 0ApplyFunctions.DayOfWeek(x.RecordShell.LastTimeOnline)
    ApplyFunctions.DayOfMonthtimeConverts the unix timestamp to the current day of the month (1..31)ApplyFunctions.DayOfMonth(x.RecordShell.LastTimeOnline)
    ApplyFunctions.DayOfYeartimeConverts the unix timestamp to the current day of the year (1..31)ApplyFunctions.DayOfYear(x.RecordShell.LastTimeOnline)
    ApplyFunctions.YeartimeConverts the unix timestamp to the current yearApplyFunctions.Year(x.RecordShell.LastTimeOnline)
    ApplyFunctions.MonthOfYeartimeConverts the unix timestamp to the current month (0..11)ApplyFunctions.MonthOfYear(x.RecordShell.LastTimeOnline)

    Geo Distance

    Another useful function is the GeoDistance function, which allows you computer the distance between two points, e.g. if you wanted to see how far away from the office each employee was you could use the ApplyFunctions.GeoDistance function inside your pipeline:

    var officeLoc = new GeoLoc(-122.064181, 37.377207);
    var distanceFromWork =
    employeeAggregations.Apply(x => ApplyFunctions.GeoDistance(x.RecordShell.HomeLoc, officeLoc), "DistanceToWork");
    await foreach (var element in distancesFromWork)
    {
    Console.WriteLine(element["DistanceToWork"].ToString());
    }
    - + \ No newline at end of file diff --git a/develop/dotnet/redis-om-dotnet/aggregations/groups/groups/index.html b/develop/dotnet/redis-om-dotnet/aggregations/groups/groups/index.html index f21157d32b..2b209c4dbf 100644 --- a/develop/dotnet/redis-om-dotnet/aggregations/groups/groups/index.html +++ b/develop/dotnet/redis-om-dotnet/aggregations/groups/groups/index.html @@ -4,7 +4,7 @@ Grouping and Reductions | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Grouping and Reductions

    Grouping and reducing operations using aggregations can be extremely powerful.

    What Is a Group

    A group is simply a group of like records in Redis.

    e.g.

    {
    "Name":"Susan",
    "Department":"Sales",
    "Sales":600000
    }

    {
    "Name":"Tom",
    "Department":"Sales",
    "Sales":500000
    }

    If grouped together by Department would be one group. When grouped by Name, they would be two groups.

    Reductions

    What makes groups so useful in Redis Aggregations is that you can run reductions on them to aggregate items within the group. For example, you can calculate summary statistics on numeric fields, retrieve random samples, distinct counts, approximate distinct counts of any aggregatable field in the set.

    Using Groups and Reductions with Redis OM .NET

    You can run reductions against an RedisAggregationSet either with or without a group. If you run a reduction without a group, the result of the reduction will materialize immediately as the desired type. If you run a reduction against a group, the results will materialize when they are enumerated.

    Reductions without a Group

    If you wanted to calculate a reduction on all the records indexed by Redis in the collection, you would simply call the reduction on the RedisAggregationSet

    var sumSales = employeeAggregations.Sum(x=>x.RecordShell.Sales);
    Console.WriteLine($"The sum of sales for all employees was {sumSales}");

    Reductions with a Group

    If you want to build a group to run reductions on, e.g. you wanted to calculate the average sales in a department, you would use a GroupBy predicate to specify which field or fields to group by. If you want to group by 1 field, your lambda function for the group by will yield just the field you want to group by. If you want to group by multiple fields, new up an anonymous type in line:

    var oneFieldGroup = employeeAggregations.GroupBy(x=>x.RecordShell.Department);

    var multiFieldGroup = employeeAggregations.GroupBy(x=>new {x.RecordShell.Department, x.RecordShell.WorkLoc});

    From here you can run reductions on your groups. To run a Reduction, execute a reduction function. When the collection materializes the AggregationResult<T> will have the reduction stored in a formatted string which is the PropertyName_COMMAND_POSTFIX, see supported operations table below for postfixes. If you wanted to calculate the sum of the sales of all the departments you could:

    var departments = employeeAggregations.GroupBy(x=>x.RecordShell.Department).Sum(x=>x.RecordShell.Sales);
    foreach(var department in departments)
    {
    Console.WriteLine($"The {department[nameof(Employee.Department)]} department sold {department["Sales_SUM"]}");
    }
    Command NameCommand PostfixDescription
    CountCOUNTnumber of records meeting the query, or in the group
    CountDistinctCOUNT_DISTINCTCounts the distinct occurrences of a given property in a group
    CountDistinctishCOUNT_DISTINCTISHProvides an approximate count of distinct occurrences of a given property in each group - less expensive computationally but does have a small 3% error rate
    SumSUMThe sum of all occurrences of the provided field in each groupb
    MinMINMinimum occurrence for the provided field in each group
    MaxMAXMaximum occurrence for the provided field in each group
    AverageAvgArithmetic mean of all the occurrences for the provided field in a group
    StandardDeviationSTDDEVStandard deviation from the arithmetic mean of all the occurrences for the provided field in each group
    QuantileQUANTLEThe value of a record at the provided quantile for a field in each group, e.g., the Median of the field would be sitting at quantile .5
    DistinctTOLISTEnumerates all the distinct values of a given field in each group
    FirstValueFIRST_VALUERetrieves the first occurrence of a given field in each group
    RandomSampleRANDOMSAMPLE{NumRecords}Random sample of the given field in each group

    Closing Groups

    When you invoke a GroupBy the type of return type changes from RedisAggregationSet to a GroupedAggregationSet. In some instances you may need to close a group out and use its results further down the pipeline. To do this, all you need to do is call CloseGroup on the GroupedAggregationSet - that will end the group predicates and allow you to use the results further down the pipeline.

    - + \ No newline at end of file diff --git a/develop/dotnet/redis-om-dotnet/aggregations/index.html b/develop/dotnet/redis-om-dotnet/aggregations/index.html index 101310a2b8..7ea1dfb0f9 100644 --- a/develop/dotnet/redis-om-dotnet/aggregations/index.html +++ b/develop/dotnet/redis-om-dotnet/aggregations/index.html @@ -4,7 +4,7 @@ Aggregations Intro | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Aggregations Intro

    Aggregations are a method of grouping documents together and run processing on them on the server to transform them into data that you need in your application, without having to perform the computation client-side.

    Anatomy of a Pipeline

    Aggregations in Redis are build around an aggregation pipeline, you will start off with a RedisAggregationSet<T> of objects that you have indexed in Redis. From there you can

    • Query to filter down the results you want
    • Apply functions to them to combine functions to them
    • Group like features together
    • Run reductions on groups
    • Sort records
    • Further filter down records

    Setting up for Aggregations

    Redis OM .NET provides an RedisAggregationSet<T> class that will let you perform aggregations on employees, let's start off with a trivial aggregation. Let's start off by defining a model:

    [Document]
    public class Employee
    {
    [Indexed]
    public string Name { get; set; }

    [Indexed]
    public GeoLoc? HomeLoc { get; set; }

    [Indexed(Aggregatable = true)]
    public int Age { get; set; }

    [Indexed(Aggregatable = true)]
    public double Sales { get; set; }

    [Indexed(Aggregatable = true)]
    public double SalesAdjustment { get; set; }

    [Searchable(Aggregatable = true)]
    public string Department { get; set; }
    }

    We'll then create the index for that model, pull out a RedisAggregationSet<T> from our provider, and initialize the index, and seed some data into our database

    var provider = new RedisConnectionProvider("redis://localhost:6379");
    await provider.Connection.CreateIndexAsync(typeof(Restaurant));
    var employees = provider.RedisCollection<Employee>();
    var employeeAggregations = provider.AggregationSet<Employee>();
    var e1 = new Employee {Name = "Bob", Age = 35, Sales = 100000, SalesAdjustment = 1.5, Department = "EMEA Sales"};
    var e2 = new Employee {Name = "Alice", Age = 52, Sales = 300000, SalesAdjustment = 1.02, Department = "Partner Sales"};
    var e3 = new Employee {Name = "Marcus", Age = 42, Sales = 250000, SalesAdjustment = 1.1, Department = "NA Sales"};
    var e4 = new Employee {Name = "Susan", Age = 27, Sales = 200000, SalesAdjustment = .95, Department = "EMEA Sales"};
    var e5 = new Employee {Name = "John", Age = 38, Sales = 275000, SalesAdjustment = .9, Department = "APAC Sales"};
    var e6 = new Employee {Name = "Theresa", Age = 30, Department = "EMEA Ops"};
    employees.Insert(e1);
    employees.Insert(e2);
    employees.Insert(e3);
    employees.Insert(e4);
    employees.Insert(e5);
    employees.Insert(e6);

    The AggregationResult

    The Aggregations pipeline is all built around the RedisAggregationSet<T> this Set is generic, so you can provide the model that you want to build your aggregations around (an Indexed type), but you will notice that the return type from queries to the RedisAggregationSet is the generic type passed into it. Rather it is an AggregationResult<T> where T is the generic type you passed into it. This is a really important concept, when results are returned from aggregations, they are not hydrated into an object like they are with queries. That's because Aggregations aren't meant to pull out your model data from the database, rather they are meant to pull out aggregated results. The AggregationResult has a RecordShell field, which is ALWAYS null outside of the pipeline. It can be used to build expressions for querying objects in Redis, but when the AggregationResult lands, it will not contain a hydrated record, rather it will contain a dictionary of Aggregations built by the Aggregation pipeline. This means that you can access the results of your aggregations by indexing into the AggregationResult.

    Simple Aggregations

    Let's try running an aggregation where we find the Sum of the sales for all our employees in EMEA. So the Aggregations Pipeline will use the RecordShell object, which is a reference to the generic type of the aggregation set, for something as simple as a group-less SUM, you will simply get back a numeric type from the aggregation.

    var sumOfSalesEmea = employeeAggregations.Where(x => x.RecordShell.Department == "EMEA")
    .Sum(x => x.RecordShell.Sales);
    Console.WriteLine($"EMEA sold:{sumOfSalesEmea}");

    The Where expression tells the aggregation pipeline which records to consider, and subsequently the SUM expression indicates which field to sum. Aggregations are a rich feature and this only scratches the surface of it, these pipelines are remarkably flexible and provide you the ability to do all sorts of neat operations on your Data in Redis.

    - + \ No newline at end of file diff --git a/develop/dotnet/redis-om-dotnet/getting-started/index.html b/develop/dotnet/redis-om-dotnet/getting-started/index.html index 777296bb92..93b55160fa 100644 --- a/develop/dotnet/redis-om-dotnet/getting-started/index.html +++ b/develop/dotnet/redis-om-dotnet/getting-started/index.html @@ -4,7 +4,7 @@ Getting Started with Redis OM .NET | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Getting Started with Redis OM .NET

    Redis OM is designed to make using Redis easier for .NET developers, so naturally the first question one might ask is where to start?

    Prerequisites

    • A .NET Standard 2.0 compatible version of .NET. This means that all .NET Framework versions 4.6.1+, .NET Core 2.0+ and .NET 5+ will work with Redis OM .NET.
    • An IDE for writing .NET, Visual Studio, Rider, VS Code will all work.

    Installation

    To install Redis OM .NET all you need to do is add the Redis.OM NuGet package to your project. This can be done by running dotnet add package Redis.OM

    Connecting to Redis.

    The next major step for getting started with Redis OM .NET is to connect to Redis.

    The Redis OM library is an abstraction above a lower level (closer to Redis) library—StackExchange.Redis—which it uses to manage connections to Redis. That is however, an implementation detail which should not be a concern to the user. RedisConnectionProvider class contains the connection logic, and provides for connections to Redis. The RedisConnectionProvider should only be initialized once in your app's lifetime.

    Initializing RedisConnectionProvider

    RedisConnectionProvider takes a Redis URI and uses that to initialize a connection to Redis.

    Consequentially, all that needs to be done to initialize the client is calling the constructor of RedisConnectionProvider with a Redis uri. Alternatively, you can connect with a ConnectionConfiguration object, or if you have a ConnectionMultiplexer in your DI container already, you can construct it with your ConnectionMultiplexer.

    Connecting to a Standalone Instance of Redis No Auth

    var provider = new RedisConnectionProvider("redis://hostname:port");

    Connecting to Standalone Instance of Redis Just Password

    var provider = new RedisConnectionProvider("redis://:password@hostname:port");

    Connecting to Standalone Instance of Redis or Redis Enterprise Username and Password

    var provider = new RedisConnectionProvider("redis://username:password@hostname:port");

    Connecting to Standalone Instance of Redis Particular Database

    var provider = new RedisConnectionProvider("redis://username:password@hostname:port/4");

    Connecting to Redis Sentinel

    When connecting to Redis Sentinel, you will need to provide the sentinel

    var provider = new RedisConnectionProvider("redis://username:password@sentinel-hostname:port?endpoint=another-sentinel-host:port&endpoint=yet-another-sentinel-hot:port&sentinel_primary_name=redisprimary");

    Connecting to Redis Cluster

    Connecting to a Redis Cluster is similar to connecting to a standalone server, it is advisable however to include at least one other alternative endpoint in the URI as a query parameter in case of a failover event.

    var provider = new RedisConnectionProvider("redis://username:password@hostname:port?endpoint=another-primary-host:port");

    Getting the RedisConnection, RedisCollection, and RedisAggregationSet

    There are three primary drivers of Redis in this Library, which can all be accessed from the provider object after it's been initialize.

    • The RedisConnection - this provides a command level interface to Redis, a limited set of commands are directly implemented, but any command can be executed via the Execute and ExecuteAsync commands. To get a handle to the RedisConnection just use provider.Connection
    • RedisCollection<T> - This is a generic collection used to access Redis. It provides a fluent interface for retrieving data stored in Redis. To create a RedisCollection<T> use provider.RedisCollection<T>()
    • RedisAggregationSet<T> - This is another generic collection used to aggregate data in Redis. It provides a fluent interface for performing mapping & reduction operations on Redis. To create a RedisAggregationSet<T>use provider.AggregationSet<T>()
    - + \ No newline at end of file diff --git a/develop/dotnet/redis-om-dotnet/searching/creating-an-index/index.html b/develop/dotnet/redis-om-dotnet/searching/creating-an-index/index.html index c795ef0ab8..38cf5d3d7c 100644 --- a/develop/dotnet/redis-om-dotnet/searching/creating-an-index/index.html +++ b/develop/dotnet/redis-om-dotnet/searching/creating-an-index/index.html @@ -4,7 +4,7 @@ Creating an Index with Redis OM | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Creating an Index with Redis OM

    To unlock some of the nicest functionality of Redis OM, e.g., running searches, matches, aggregations, reductions, mappings, etc... You will need to tell Redis how you want data to be stored and how you want it indexed. One of the features the Redis OM library provides is creating indices that map directly to your objects by declaring the indices as attributes on your class.

    Let's start with an example class.

    [Document]
    public partial class Person
    {
    [RedisIdField]
    public string Id { get; set; }

    [Searchable(Sortable = true)]
    public string Name { get; set; }

    [Indexed(Aggregatable = true)]
    public GeoLoc? Home { get; set; }

    [Indexed(Aggregatable = true)]
    public GeoLoc? Work { get; set; }

    [Indexed(Sortable = true)]
    public int? Age { get; set; }

    [Indexed(Sortable = true)]
    public int? DepartmentNumber { get; set; }

    [Indexed(Sortable = true)]
    public double? Sales { get; set; }

    [Indexed(Sortable = true)]
    public double? SalesAdjustment { get; set; }

    [Indexed(Sortable = true)]
    public long? LastTimeOnline { get; set; }

    [Indexed(Aggregatable = true)]
    public string Email { get; set; }
    }

    As shown above, you can declare a class as being indexed with the Document Attribute. In the Document attribute, you can set a few fields to help build the index:

    Property NameDescriptionDefaultOptional
    StorageTypeDefines the underlying data structure used to store the object in Redis, options are HASH and JSON, Note JSON is only useable with Redis StackHASHtrue
    IndexNameThe name of the index$"{SimpleClassName.ToLower()}-idx}true
    PrefixesThe key prefixes for redis to build an index off ofnew string[]{$"{FullyQualifiedClassName}:"}true
    LanguageLanguage to use for full-text search indexingnulltrue
    LanguageFieldThe name of the field in which the document stores its Languagenulltrue
    FilterThe filter to use to determine whether a particular item is indexed, e.g. @Age>=18nulltrue
    IdGenerationStrategyThe strategy used to generate Ids for documents, if left blank it will use a ULID generation strategyUlidGenerationStrategytrue

    Field Level Declarations

    Id Fields

    Every class indexed by Redis must contain an Id Field marked with the RedisIdField.

    Indexed Fields

    In addition to declaring an Id Field, you can also declare indexed fields, which will let you search for values within those fields afterward. There are two types of Field level attributes.

    1. Indexed - This type of index is valid for fields that are of the type string, a Numeric type (double/int/float etc. . .), or can be decorated for fields that are of the type GeoLoc, the exact way that the indexed field is interpreted depends on the indexed type
    2. Searchable - This type is only valid for string fields, but this enables full-text search on the decorated fields.

    IndexedAttribute Properties

    There are properties inside the IndexedAttribute that let you further customize how things are stored & queried.

    PropertyNametypeDescriptionDefaultOptional
    PropertyNamestringThe name of the property to be indexedThe name of the property being indexedtrue
    SortableboolWhether to index the item so it can be sorted on in queries, enables use of OrderBy & OrderByDescending -> collection.OrderBy(x=>x.Email)falsetrue
    NormalizeboolOnly applicable for string type fields Determines whether the text in a field is normalized (sent to lower case) for purposes of sortingtruetrue
    SeparatorcharOnly applicable for string type fields Character to use for separating tag field, allows the application of multiple tags fo the same item e.g. article.Category = technology,parenting is delineated by a , means that collection.Where(x=>x.Category == "technology") and collection.Where(x=>x.Category == "parenting") will both match the record``true
    CaseSensitiveboolOnly applicable for string type fields - Determines whether case is considered when performing matches on tagsfalsetrue

    SearchableAttribute Properties

    There are properties for the SearchableAttribute that let you further customize how the full-text search determines matches

    PropertyNametypeDescriptionDefaultOptional
    PropertyNamestringThe name of the property to be indexedThe name of the indexed propertytrue
    SortableboolWhether to index the item so it can be sorted on in queries, enables use of OrderBy & OrderByDescending -> collection.OrderBy(x=>x.Email)falsetrue
    NoStemboolDetermines whether to use stemming, in other words adding the stem of the word to the index, setting to true will stop the Redis from indexing the stems of wordsfalsetrue
    PhoneticMatcherstringThe phonetic matcher to use if you'd like the index to use (PhoneticMatching)[https://oss.redis.com/redisearch/Phonetic_Matching/] with the indexnulltrue
    Weightdoubledetermines the importance of the field for checking result accuracy1.0true

    Creating The Index

    After declaring the index, the creation of the index is pretty straightforward. All you have to do is call CreateIndex for the decorated type. The library will take care of serializing the provided type into a searchable index. The library does not try to be particularly clever, so if the index already exists it will the creation request will be rejected, and you will have to drop and re-add the index (migrations is a feature that may be added in the future)

    var connection = provider.Connection;
    connection.CreateIndex(typeof(Person));
    - + \ No newline at end of file diff --git a/develop/dotnet/redis-om-dotnet/searching/geo-filters/index.html b/develop/dotnet/redis-om-dotnet/searching/geo-filters/index.html index dba62d8069..d661e68296 100644 --- a/develop/dotnet/redis-om-dotnet/searching/geo-filters/index.html +++ b/develop/dotnet/redis-om-dotnet/searching/geo-filters/index.html @@ -4,7 +4,7 @@ Geo Filters | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Geo Filters

    A really nifty bit of indexing you can do with Redis OM is geo-indexing. To GeoIndex, all you need to do is to mark a GeoLoc field in your model as Indexed and create the index

    [Document]
    public class Restaurant
    {
    [Indexed]
    public string Name { get; set; }

    [Indexed]
    public GeoLoc Location{get; set;}

    [Indexed(Aggregatable = true)]
    public double CostPerPerson{get;set;}
    }

    So let's create the index and seed some data.

    // connect
    var provider = new RedisConnectionProvider("redis://localhost:6379");

    // get connection
    var connection = provider.Connection;

    // get collection
    var restaurants = provider.RedisCollection<Restaurant>();

    // Create index
    await connection.CreateIndexAsync(typeof(Restaurant));

    // seed with dummy data
    var r1 = new Restaurant {Name = "Tony's Pizza & Pasta", CostPerPerson = 12.00, Location = new (-122.076751,37.369929)};
    var r2 = new Restaurant {Name = "Nizi Sushi", CostPerPerson = 16.00, Location = new (-122.057360,37.371207)};
    var r3 = new Restaurant {Name = "Thai Thai", CostPerPerson = 11.50, Location = new (-122.04382,37.38)};
    var r4 = new Restaurant {Name = "Chipotles", CostPerPerson = 8.50, Location = new (-122.0524,37.359719 )};
    restaurants.Insert(r1);
    restaurants.Insert(r2);
    restaurants.Insert(r3);
    restaurants.Insert(r4);

    Querying Based off Location

    With our data seeded, we can now run geo-filters on our restaurants data, let's say we had an office (e.g. Redis's offices in Mountain View at -122.064224,37.377266) and we wanted to find nearby restaurants, we could do so by using a GeoFilter query restaurants within a certain radius, say 1 mile we can:

    var nearbyRestaurants = restaurants.GeoFilter(x => x.Location, -122.064224, 37.377266, 5, GeoLocDistanceUnit.Miles);
    foreach (var restaurant in nearbyRestaurants)
    {
    Console.WriteLine($"{restaurant.Name} is within 1 mile of work");
    }
    - + \ No newline at end of file diff --git a/develop/dotnet/redis-om-dotnet/searching/numeric-queries/index.html b/develop/dotnet/redis-om-dotnet/searching/numeric-queries/index.html index 5993295679..c039b5dc7a 100644 --- a/develop/dotnet/redis-om-dotnet/searching/numeric-queries/index.html +++ b/develop/dotnet/redis-om-dotnet/searching/numeric-queries/index.html @@ -4,7 +4,7 @@ Numeric Queries | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Numeric Queries

    In addition to providing capabilities for text queries, Redis OM also provides you the ability to perform numeric equality and numeric range queries. Let us assume a model of:

    [Document]
    public class Employee
    {
    [Indexed]
    public string Name { get; set; }

    [Indexed(Aggregatable = true)]
    public int Age { get; set; }

    [Indexed(Aggregatable = true)]
    public double Sales { get; set; }

    [Searchable(Aggregatable = true)]
    public string Department { get; set; }
    }

    Assume that we've connected to Redis already and retrieved a RedisCollection and seeded some data as such:

    var employees = provider.RedisCollection<Employee>();
    var e1 = new Employee {Name = "Bob", Age = 35, Sales = 100000, Department = "EMEA Sales"};
    var e2 = new Employee {Name = "Alice", Age = 52, Sales = 300000, Department = "Partner Sales"};
    var e3 = new Employee {Name = "Marcus", Age = 42, Sales = 250000, Department = "NA Sales"};
    var e4 = new Employee {Name = "Susan", Age = 27, Sales = 200000, Department = "EMEA Sales"};
    var e5 = new Employee {Name = "John", Age = 38, Sales = 275000, Department = "APAC Sales"};
    var e6 = new Employee {Name = "Theresa", Age = 30, Department = "EMEA Ops"};
    employees.Insert(e1);
    employees.Insert(e2);
    employees.Insert(e3);
    employees.Insert(e4);
    employees.Insert(e5);
    employees.Insert(e6);

    We can now perform queries against the numeric values in our data as you would with any other collection using LINQ expressions.

    var underThirty = employees.Where(x=>x.Age < 30);
    var middleTierSales = employees.Where(x=>x.Sales > 100000 && x.Sales < 300000);

    You can of course also pair numeric queries with Text Queries:

    var emeaMidTier = employees.Where(x=>x.Sales>100000 & x.Sales <300000 && x.Department == "EMEA");

    Sorting

    If an Indexed field is marked as Sortable, or Aggregatable, you can order by that field using OrderBy predicates.

    var employeesBySales = employees.OrderBy(x=>x.Sales);
    var employeesBySalesDescending = employees.OrderByDescending(x=>x.Sales);
    - + \ No newline at end of file diff --git a/develop/dotnet/redis-om-dotnet/simple-text-queries/index.html b/develop/dotnet/redis-om-dotnet/simple-text-queries/index.html index 04d6298e96..4fb5317fff 100644 --- a/develop/dotnet/redis-om-dotnet/simple-text-queries/index.html +++ b/develop/dotnet/redis-om-dotnet/simple-text-queries/index.html @@ -4,7 +4,7 @@ Simple Text Queries | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Simple Text Queries

    The RedisCollection provides a fluent interface for querying objects stored in redis. This means that if you store an object in Redis with the Redis OM library, and you have Redis Stack running, you can query objects stored in Redis with ease using the LINQ syntax you're used to.

    Define the Model

    Let's start off by defining a model that we will be using for querying, we will use a Employee Class which will have some basic stuff we may want to query in it

    [Document]
    public class Employee
    {
    [Indexed]
    public string Name { get; set; }

    [Indexed(Aggregatable = true)]
    public int Age { get; set; }

    [Indexed(Aggregatable = true)]
    public double Sales { get; set; }

    [Searchable(Aggregatable = true)]
    public string Department { get; set; }
    }

    Connect to Redis

    Now we will initialize a RedisConnectionProvider, and grab a handle to a RedisCollection for Employee

    static async Task Main(string[] args)
    {
    var provider = new RedisConnectionProvider("redis://localhost:6379");
    var connection = provider.Connection;
    var employees = prover.RedisCollection<Employee>();
    await connection.CreateIndexAsync(typeof(Employee));
    }

    Create our Index

    Next we'll create the index, so next in our Main method, let's take our type and condense it into an index

    Seed some Data

    Next we'll seed a few piece of data in our database to play around with:

    var e1 = new Employee {Name = "Bob", Age = 35, Sales = 100000, Department = "EMEA Sales"};
    var e2 = new Employee {Name = "Alice", Age = 52, Sales = 300000, Department = "Partner Sales"};
    var e3 = new Employee {Name = "Marcus", Age = 42, Sales = 250000, Department = "NA Sales"};
    var e4 = new Employee {Name = "Susan", Age = 27, Sales = 200000, Department = "EMEA Sales"};
    var e5 = new Employee {Name = "John", Age = 38, Sales = 275000, Department = "APAC Sales"};
    var e6 = new Employee {Name = "Theresa", Age = 30, Department = "EMEA Ops"};
    var insertTasks = new []
    {
    employees.InsertAsync(e1),
    employees.InsertAsync(e2),
    employees.InsertAsync(e3),
    employees.InsertAsync(e4),
    employees.InsertAsync(e5)
    employees.InsertAsync(e6)
    };
    await Task.WhenAll(insertTasks);

    Simple Text Query of an Indexed Field

    With these data inserted into our database, we can now go ahead and begin querying. Let's start out by trying to query people by name. We can search for all employees named Susan with a simple Where predicate:

    var susans = employees.Where(x => x.Name == "Susan");
    await foreach (var susan in susans)
    {
    Console.WriteLine($"Susan is {susan.Age} years old and works in the {susan.Department} department ");
    }

    The Where Predicates also support and/or operators, e.g. to find all employees named Alice or Bob you can use:

    var AliceOrBobs = employees.Where(x => x.Name == "Alice" || x.Name == "Bob");
    await foreach (var employee in AliceOrBobs)
    {
    Console.WriteLine($"{employee.Name} is {employee.Age} years old and works in the {employee.Department} Department");
    }

    Limiting Result Object Fields

    When you are querying larger Documents in Redis, you may not want to have to drag back the entire object over the network, in that case you can limit the results to only what you want using a Select predicate. E.g. if you only wanted to find out the ages of employees, all you would need to do is select the age of employees:

    var employeeAges = employees.Select(x => x.Age);
    await foreach (var age in employeeAges)
    {
    Console.WriteLine($"age: {age}");
    }

    Or if you want to select more than one field you can create a new anonymous object:

    var employeeAges = employees.Select(x => new {x.Name, x.Age});
    await foreach (var e in employeeAges)
    {
    Console.WriteLine($"{e.Name} is age: {e.Age} years old");
    }

    Limiting Returned Objects

    You can limit the size of your result (in the number of objects returned) with Skip & Take predicates. Skip will skip over the specified number of records, and Take will take only the number of records provided (at most);

    var people = employees.Skip(1).Take(2);
    await foreach (var e in people)
    {
    Console.WriteLine($"{e.Name} is age: {e.Age} years old");
    }

    There are two types of attributes that can decorate strings, Indexed, which we've gone over and Searchable which we've yet to discuss. The Searchable attribute considers equality slightly differently than Indexed, it operates off a full-text search. In expressions involving Searchable fields, equality—==— means a match. A match in the context of a searchable field is not necessarily a full exact match but rather that the string contains the search text. Let's look at some examples.

    Find Employee's in Sales

    So we have a Department string which is marked as Searchable in our Employee class. Notice how we've named our departments. They contain a region and a department type. If we wanted only to find all employee's in Sales we could do so with:

    var salesPeople = employees.Where(x => x.Department == "Sales");
    await foreach (var employee in salesPeople)
    {
    Console.WriteLine($"{employee.Name} is in the {employee.Department} department");
    }

    This will produce:

    Bob is in the EMEA Sales department
    Alice is in the Partner Sales department
    Marcus is in the NA Sales department
    Susan is in the EMEA Sales department
    John is in the APAC Sales department

    Because they are all folks in departments called sales

    If you wanted to search for everyone in a department in EMEA you could search with:

    var emeaFolks = employees.Where(x => x.Department == "EMEA");
    await foreach (var employee in emeaFolks)
    {
    Console.WriteLine($"{employee.Name} is in the {employee.Department} department");
    }

    Which of course would produce:

    Bob is in the EMEA Sales department
    Susan is in the EMEA Sales department
    Theresa is in the EMEA Ops department

    Sorting

    If a Searchable or Indexed field is marked as Sortable, or Aggregatable, you can order by that field using OrderBy predicates.

    var employeesBySales = employees.OrderBy(x=>x.Name);
    var employeesBySalesDescending = employees.OrderByDescending(x=>x.Name);
    - + \ No newline at end of file diff --git a/develop/dotnet/streams/blocking-reads/cs-redis/index.html b/develop/dotnet/streams/blocking-reads/cs-redis/index.html index 05adaadc4c..edcd90d0c3 100644 --- a/develop/dotnet/streams/blocking-reads/cs-redis/index.html +++ b/develop/dotnet/streams/blocking-reads/cs-redis/index.html @@ -4,7 +4,7 @@ Blocking Stream reads with CSRedis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Blocking Stream reads with CSRedis

    CSRedis is an MIT Licensed Open source project which provides a straightforward interface for executing commands. CSRedis can be used effectively for performing blocking stream reads with the one major downside that it does not support any async API for them.

    Start Redis

    Before we begin, we'll start up Redis. If you are developing locally, which we'll assume you are for the duration of this tutorial, you can start Redis with a simple docker command.

    docker run -p 6379:6379 redis

    Create the app

    We will build a simple console application for streaming telemetry using the library. To do so, use the dotnet new command:

    dotnet new console -n StreamsWithCSRedis

    Add the package to your app

    Run the cd StreamsWithCSRedis command to change directories into the application's directory and run the following to add the CSRedis package

    dotnet add package CSRedisCore

    Create group

    When we start up our app, the first thing we'll do is create our avg group. To make this group, open up Program.cs and add to it the following:

    var cancellationTokenSource = new CancellationTokenSource();
    var token = cancellationTokenSource.Token;

    var client = new CSRedisClient("localhost");
    if (!client.Exists("stream") || client.XInfoStream("stream").groups == 0)
    {
    client.XGroupCreate("stream", "avg", "$", MkStream: true);
    }

    This code will create a cancellation token for the threads we'll spin up to do the writes/reads to the stream, create a client, check if our avg group already exists, and finally create the avg group if it doesn't.

    Write to the stream

    Next, we'll write out to the stream. We'll call the stream stream, and send a temp and time field along with the stream. We'll do this every 2 seconds. We'll put this on its own thread, since this operation isn't actually 'blocking' in the Redis sense, it may be alright to spin it out on its task, but as the other two operations in here are blocking, it's better to spin it off on its own thread as well. Add the following to your Program.cs file:

    var writeThread = new Thread(() =>
    {
    var writeClient = new CSRedisClient("localhost");
    var random = new Random();
    while (!token.IsCancellationRequested)
    {
    writeClient.XAdd("stream", new (string, string)[]{new ("temp", random.Next(50,65).ToString()), new ("time", DateTimeOffset.Now.ToUnixTimeSeconds().ToString())});
    Thread.Sleep(2000);
    }
    });

    Parsing read results

    The next issue we'll need to dispose of is parsing the read results from the XREAD and XREADGROUP commands. CSRedis handles return types generally as tuples in a reply, so we'll need a way to parse the result into something more useable. In this case, we'll parse the results into a dictionary. For the sake of brevity, we will keep everything in this project in Program.cs on the top-level method, so we'll declare a Func to handle the parsing. This function will pull the first message from the first stream and arrange the values returned into a dictionary. A couple of things to consider here if you wanted to expand this further is that you could reply with a dictionary of dictionaries if you were pulling back multiple messages from multiple streams. This complexity is intentionally left out.

    Func<(string key, (string id, string[] items)[] data), Dictionary<string,string>> parse = delegate((string key, (string id, string[] items)[] data) streamResult)
    {
    var message = streamResult.data.First().items;
    var result = new Dictionary<string, string>();
    for (var i = 0; i < message.Length; i += 2)
    {
    result.Add(message[i], message[i+1]);
    }

    return result;
    };

    Blocking XREAD

    There are two primary types of 'read' methods, XREAD and XREADGROUP, this is in addition to the various range methods, which are their category and operate semantically differently from the read operations. XREAD lets you read off a given stream and read the next item that hit's the stream. You can do this with the special $ id. For our purposes here, we are going to block for two seconds, or whenever we get a response back from redis, whichever comes first:

    var readThread = new Thread(() =>
    {
    var readClient = new CSRedisClient("localhost");
    while (!token.IsCancellationRequested)
    {
    var result = readClient.XRead(1, 5000, new (string key, string id)[] {new("stream", "$")});
    if (result != null)
    {
    var dictionary = parse(result[0]);
    Console.WriteLine($"Most recent message, time: {dictionary["time"]} temp: {dictionary["temp"]}");
    }
    }
    });

    Blocking XREADGROUP

    Blocking XREADGROUP commands operate very similarly to XREAD. In this case, however, the creation of the group told us what id to start at, and by passing in the > we necessarily start off at the next message in the queue. Because we are reading out of a group, we'll also want to XACK to any messages that we pull down. Also, since this is our average group, we'll maintain an average for our stream's temperatures.

    var total = 0;
    var count = 0;
    var groupReadThread = new Thread(() =>
    {
    var groupReadClient = new CSRedisClient("localhost");
    var id = string.Empty;
    while (!token.IsCancellationRequested)
    {
    if (!string.IsNullOrEmpty(id))
    {
    client.XAck("stream", "avg", id);
    }
    var result =
    groupReadClient.XReadGroup("avg", "avg-1", 1, 5000, new (string key, string id)[] {new("stream", ">")});
    if (result != null)
    {
    id = result.First().data.First().id;
    var dictionary = parse(result[0]);
    if (dictionary.ContainsKey("temp"))
    {
    count++;
    total += int.Parse(dictionary["temp"]);
    double avg = (double) total / count;
    Console.WriteLine($"Most recent group message, time: {dictionary["time"]} temp: {dictionary["temp"]} avg: {avg:00.00}");
    }
    }
    }
    });

    Spin up threads

    The last thing we'll need to do is start up all the threads, set a cancellation timeout (so the app doesn't run forever), and join all the threads back together:

    readThread.Start();
    writeThread.Start();
    groupReadThread.Start();

    cancellationTokenSource.CancelAfter(TimeSpan.FromSeconds(10));

    readThread.Join();
    writeThread.Join();
    groupReadThread.Join();

    Run the app

    Now that the app is written, all that's left to do is run it. You can do so by running `dotnet run in your terminal.

    Resources:

    • The source for this tutorial is in GitHub
    • Redis University has an extensive course on Redis Streams where you can learn everything you need to know about them.
    • You can learn more about Redis Streams in the Streams Info article on redis.io
    - + \ No newline at end of file diff --git a/develop/dotnet/streams/blocking-reads/index.html b/develop/dotnet/streams/blocking-reads/index.html index b1f73452e7..ccd3cfac1f 100644 --- a/develop/dotnet/streams/blocking-reads/index.html +++ b/develop/dotnet/streams/blocking-reads/index.html @@ -4,7 +4,7 @@ Blocking Stream Reads | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Blocking Stream Reads

    Redis Streams can be used to build a message bus for our applications. The ability of multiple readers to consume messages from a Redis Stream in a consumer group makes Redis Streams ideal for a variety of use cases where you want the assurance of message delivery and where you have high volumes of data you want to distribute across multiple consumers.

    One of the great things about Redis Streams is that you can reduce the number of requests you need to make to Redis by having consumers use blocking requests and wait for new messages to come into the stream. In terms of commands, this would look something like this:

    XREADGROUP GROUP average avg1 COUNT 1 BLOCK 1000 STREAMS numbers >

    Or, for a simple XREAD, you can wait for the next message to come in:

    127.0.0.1:6379> XREAD BLOCK 1000 STREAMS numbers $

    The main .NET Redis client StackExchange.Redis does not support this particular feature. The reason for this lack of support is architectural, the StackExchange.Redis client centers all commands to Redis around a single connection. Because of this, blocking that connection for a single client will block all other requests to Redis. If we want to do blocking stream reads with Redis in .NET we'll need to use different clients to do so. Contained in this section are tutorials for doing so with both ServiceStack.Redis and CsRedis

    - + \ No newline at end of file diff --git a/develop/dotnet/streams/blocking-reads/service-stack/index.html b/develop/dotnet/streams/blocking-reads/service-stack/index.html index 39ad14ff2f..366dd6b024 100644 --- a/develop/dotnet/streams/blocking-reads/service-stack/index.html +++ b/develop/dotnet/streams/blocking-reads/service-stack/index.html @@ -4,7 +4,7 @@ How to handle blocking stream reads with ServiceStack.Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    How to handle blocking stream reads with ServiceStack.Redis

    ServiceStack.Redis is part of the ServiceStack suite, it has some restrictions when used for commercial purposes - see their license

    Start Redis

    If you're developing locally (which is what we will assume for the balance of this tutorial), you can start Redis fairly quickly with docker:

    docker run -p 6379:6379 redis

    Create the app

    We will build a simple console application for streaming telemetry using the library. To do so, use the dotnet new command:

    dotnet new console -n StreamsWithServiceStack

    Add the package to your app

    You can add this package to your app with:

    dotnet add package ServiceStack.Redis

    Initialize the client manager

    To initialize a client with ServiceStack, you'll need to create a RedisClientManager. Then, add the following to Program.cs.

    var manager = new BasicRedisClientManager("localhost");

    Add items to streams

    Redis streams are not yet fully supported by ServiceStack.Redis, however, you can run raw commands easily with the CustomAsync method. So let's create a new class called Producer.cs and add the following to it.

    public static class Producer
    {
    public static async Task Produce(BasicRedisClientManager manager, CancellationToken token)
    {
    var client = await manager.GetClientAsync(token);
    var random = new Random();
    while (!token.IsCancellationRequested)
    {
    await client.CustomAsync("XADD", "telemetry", "*", "temp",random.Next(50,65), "time", DateTimeOffset.Now.ToUnixTimeSeconds());
    await Task.Delay(10000, token);
    }
    }
    }

    This code will send new telemetry every 10 seconds to the telemetry stream, with a temp record and a time record.

    Reading messages

    As mentioned earlier, ServiceStack does not have native support for the Streams API, so we need to do a bit of work after retrieving a record from a stream. However, this isn't a complex operation since the resulting structure is a predictable set of nested arrays going from an array of the streams requested to an array of messages retrieved from each stream to the message itself split between its id and its attributes. Finally, the field value pairs within a message; this looks something like this:

    127.0.0.1:6379> XREAD COUNT 1 BLOCK 20000 STREAMS telemetry $
    1) 1) "telemetry"
    2) 1) 1) "1642857504469-0"
    2) 1) "temp"
    2) "57"
    3) "time"
    4) "1642857504"

    This data structure is pretty predictable to parse, so we'll add a little parsing method. First, Create Consumer.cs and add the following to it:

    using ServiceStack.Redis;

    namespace StreamsWithServicestack;

    public static class Consumer
    {
    public static IDictionary<string, string> ParseStreamResult(RedisText text, out string id)
    {
    var result = new Dictionary<string, string>();

    var fieldValPairs = text.Children[0].Children[1].Children[0].Children[1].Children;
    id = text.Children[0].Children[1].Children[0].Children[0].Text;
    for (var i = 0; i < fieldValPairs.Count; i += 2)
    {
    result.Add(fieldValPairs[i].Text, fieldValPairs[i+1].Text);
    }

    return result;
    }
    }

    ParseStreamResult will yield the first message from the first stream of an XREAD or XREADGROUP, this isn't a fully generalized solution but will serve our purposes here.

    Reading a stream outside a group with XREAD

    To read the next message in a stream, which is necessarily a blocking operation, you will use the XREAD command with the BLOCK option and the special $ id. Then, in the Consumer class, add the following, which will read off the stream in a continuous loop, blocking for 20 seconds at each request.

    public static async Task Consume(IRedisClientsManagerAsync manager, CancellationToken token)
    {
    var client = await manager.GetClientAsync(token);
    while (!token.IsCancellationRequested)
    {
    string id;
    var result = await client.CustomAsync("XREAD", "COUNT", 1, "BLOCK", 20000, "STREAMS", "telemetry", "$");
    var fvp = ParseStreamResult(result, out id);
    Console.WriteLine($"read: result {id} - temp: {fvp["temp"]} time: {fvp["time"]}");
    }
    }

    Reading with consumer groups

    Reading messages in a consumer group can be helpful in cases where you have a common task that you want to distribute across many consumers in a high-throughput environment. It's a two-step process:

    1. Read the stream
    2. Acknowledge receipt of the message

    This task can be done by running an XREADGROUP and a XACK back to back. The XREADGROUP will take, in addition to the parameters we spoke about for the XREAD, the GROUP name, the consumer's name, and instead of taking the special $ id, it will take the special > id, which will have it take the next unassigned id for the group. We'll then extract the information from it, update our average, and then acknowledge the receipt of the message.

    public static async Task ConsumeFromGroup(IRedisClientsManagerAsync manager, CancellationToken token)
    {
    var client = await manager.GetClientAsync(token);
    var total = 0;
    var num = 0;
    while (!token.IsCancellationRequested)
    {
    string id;
    var result = await client.CustomAsync("XREADGROUP", "GROUP", "avg", "avg-1", "COUNT", "1", "BLOCK",
    20000, "STREAMS", "telemetry", ">");
    var fvp = ParseStreamResult(result, out id);
    total += int.Parse(fvp["temp"]);
    num++;
    Console.WriteLine(
    $"Group-read: result {id} - temp: {fvp["temp"]} time: {fvp["time"]}, current average: {total / num}");
    await client.CustomAsync("XACK", "telemetry", "avg", id);
    }
    }

    Create the group and start the tasks

    The final bit we need is to create the group and start up all the tasks. We'll use the XGROUP command with the MKSTREAM option to create the group. We'll then start up all the tasks we need for our producer and consumers, and we'll await everything. Add the following to your Program.cs file:

    using ServiceStack.Redis;
    using StreamsWithServicestack;

    var manager = new BasicRedisClientManager("localhost");
    var asyncClient = await manager.GetClientAsync();

    var tokenSource = new CancellationTokenSource();
    var token = tokenSource.Token;

    try
    {
    await asyncClient.CustomAsync("XGROUP", "CREATE", "telemetry", "avg", "0-0", "MKSTREAM");
    }
    catch (Exception ex)
    {
    Console.WriteLine(ex);
    }

    var writeTask = Producer.Produce(manager, token);
    var readTask = Consumer.Consume(manager, token);
    var groupReadTask = Consumer.ConsumeFromGroup(manager, token);

    await Task.WhenAll(writeTask, readTask, groupReadTask);

    Run the app

    All that's left to do is to run the app, and you'll see a continuous stream of messages coming in every 10 seconds. You can run the app by running:

    dotnet run

    Resources:

    • The source for this tutorial is in GitHub
    • Redis University has an extensive course on Redis Streams where you can learn everything you need to know about them.
    • You can learn more about Redis Streams in the Streams Info article on redis.io
    - + \ No newline at end of file diff --git a/develop/dotnet/streams/stream-basics/index.html b/develop/dotnet/streams/stream-basics/index.html index 514c94e9c7..4d6ce98283 100644 --- a/develop/dotnet/streams/stream-basics/index.html +++ b/develop/dotnet/streams/stream-basics/index.html @@ -4,7 +4,7 @@ How to use Redis Streams with .NET | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    How to use Redis Streams with .NET

    Redis Streams are a powerful data structure that allows you to use Redis as a sort of Message bus to transport messages between different application components. The way streams operate in Redis is very fast and memory efficient. This article will not go over the minutia of every command available for Redis Streams, but rather it's aimed to provide a high-level tutorial for how you can use Redis Streams with .NET.

    Start Redis

    The first thing we'll want to do is start Redis. If you already have an instance of Redis, you can ignore this bit and adjust the connection step below to connect to your instance of Redis. Redis is straightforward to get up and running; you can do so using docker:

    docker run -p 6379:6379 redis

    Create your .NET app

    For simplicity's sake, we'll stick to a simple console app, from which we'll spin out a few tasks that will perform the various add/read operations that we'll use. Create a new console app with the dotnet new command:

    dotnet new console -n RedisStreamsBasics

    Add StackExchange.Redis package

    Next, we'll need to add the client library that we will use to interface with Redis StackExchange.Redis is the canonical package, thus, we will use that in this example. First cd into the RedisStreamsBasics directory and then run the dotnet add package directory:

    cd RedisStreamsBasics
    dotnet add package StackExchange.Redis

    Initialize the Multiplexer

    StackExchange.Redis centers more or less around the ConnectionMultiplexer, which handles the routing and queuing of all commands that you send to Redis. So our first step that's code-related is to initialize the Multiplexer. Creating the Multiplexer is pretty straightforward; open up Program.cs in your IDE and add the following bit to it:

    using StackExchange.Redis;

    var tokenSource = new CancellationTokenSource();
    var token = tokenSource.Token;

    var muxer = ConnectionMultiplexer.Connect("localhost");
    var db = muxer.GetDatabase();

    const string streamName = "telemetry";
    const string groupName = "avg";

    We're also initializing a CancellationToken and CancellationTokenSource here. We'll set these up towards the end of this tutorial so that this application does not run endlessly. Also, we're creating a couple of constants, the stream's name and the group's name, that we'll use later, and we are also grabbing an IDatabase object from the Multiplexer to use

    Create the consumer group

    A Consumer Group in a Redis Stream allows you to group a bunch of consumers to pull messages off the stream for the group. This functionality is excellent when you have high throughput workloads, and you want to scale out the workers who will process your messages. To use a consumer group, you first need to create it. To create a consumer group, you'll use the StreamCreateConsumerGroupAsync method, passing in the streamName and groupName, as well as the starting id - we'll use the 0-0 id (the lowest id allowable in Redis Streams). Before invoking this call, it's wise to validate that the group doesn't exist yet, as creating an already existing user group will result in an error. So first, we'll check if the stream exists; if it doesn't, we can create the group. Next, we'll use the stream info method to see if any groups match the avg groupName.

    if (!(await db.KeyExistsAsync(streamName)) ||
    (await db.StreamGroupInfoAsync(streamName)).All(x=>x.Name!=groupName))
    {
    await db.StreamCreateConsumerGroupAsync(streamName, groupName, "0-0", true);
    }

    Spin up producer task

    Three tasks will run in parallel for our program. The first is the producerTask. This Task will write a random number between 50 and 65 as the temp and send the current time as the time.

    var producerTask = Task.Run(async () =>
    {
    var random = new Random();
    while (!token.IsCancellationRequested)
    {
    await db.StreamAddAsync(streamName,
    new NameValueEntry[]
    {new("temp", random.Next(50, 65)), new NameValueEntry("time", DateTimeOffset.Now.ToUnixTimeSeconds())});
    await Task.Delay(2000);
    }
    });

    Parser helper function for reading results

    The results retrieved from Redis will be in a reasonably readable form; all the same, it is helpful for our purposes to parse the result into a dictionary. To do this, add an inline function to handle the parsing:

    Dictionary<string, string> ParseResult(StreamEntry entry) => entry.Values.ToDictionary(x => x.Name.ToString(), x => x.Value.ToString());
    note

    Stream messages enforce no requirement that field names be unique. We use a dictionary for clarity sake in this example, but you will need to ensure that you are not passing in multiple fields with the same names in your usage to prevent an issue using a dictionary.

    Spin up most recent element task

    Next, we'll need to spin up a task to read the most recent element off of the stream. To do this, we'll use the StreamRangeAsync method passing in two special ids, - which means the lowest id, and +, which means the highest id. Running this command will result in some duplication. This redundancy is necessary because the StackExchange.Redis library does not support blocking stream reads and does not support the special $ character for stream reads. Overcoming this behavior is explored in-depth in the Blocking Reads tutorial. For this tutorial, you can manage these most-recent reads with the following code:

    var readTask = Task.Run(async () =>
    {
    while (!token.IsCancellationRequested)
    {
    var result = await db.StreamRangeAsync(streamName, "-", "+", 1, Order.Descending);
    if (result.Any())
    {
    var dict = ParseResult(result.First());
    Console.WriteLine($"Read result: temp {dict["temp"]} time: {dict["time"]}");
    }

    await Task.Delay(1000);
    }
    });

    Spin up consumer group read Task

    The final Task we'll spin up is the read task for the consumer group. Due to the nature of consumer groups, you can spin this Task up multiple times to scale out the processing as needed. It's the responsibility of Redis to keep track of which messages it's distributed to the consumer group. As well as tracking which messages Consumers have acknowledged. Acknowledging messages adds a layer of validation that all messages were processed. If something happens to one of your processing tasks or processes, you can more easily know what messages you missed.

    We'll check to see if we have a recent message-id to handle all of this. If we do, we will send an acknowledgment to the server that the id was processed. Then we will grab the next message to be processed from the stream, pull out the data and the id and print out the result.

    double count = default;
    double total = default;

    var consumerGroupReadTask = Task.Run(async () =>
    {
    string id = string.Empty;
    while (!token.IsCancellationRequested)
    {
    if (!string.IsNullOrEmpty(id))
    {
    await db.StreamAcknowledgeAsync(streamName, groupName, id);
    id = string.Empty;
    }
    var result = await db.StreamReadGroupAsync(streamName, groupName, "avg-1", ">", 1);
    if (result.Any())
    {
    id = result.First().Id;
    count++;
    var dict = ParseResult(result.First());
    total += double.Parse(dict["temp"]);
    Console.WriteLine($"Group read result: temp: {dict["temp"]}, time: {dict["time"]}, current average: {total/count:00.00}");
    }
    await Task.Delay(1000);
    }
    });

    Set timeout and await tasks

    Finally, we need to set the timeout and await the tasks at the end of our program:

    tokenSource.CancelAfter(TimeSpan.FromSeconds(20));
    await Task.WhenAll(producerTask, readTask, consumerGroupReadTask);

    Run the app

    You can now run this app with the dotnet run command.

    Resources:

    • The source for this tutorial is in GitHub
    • Redis University has an extensive course on Redis Streams where you can learn everything you need to know about them.
    • You can learn more about Redis Streams in the Streams Info article on redis.io
    - + \ No newline at end of file diff --git a/develop/golang/index.html b/develop/golang/index.html index 093ff754ce..b6129791d1 100644 --- a/develop/golang/index.html +++ b/develop/golang/index.html @@ -4,7 +4,7 @@ Golang Redis Client | The Home of Redis Developers - + @@ -16,7 +16,7 @@ Do check Redis Cache Library for Golang

    Step 1. Run a Redis server

    Redis is an open source, in-memory, key-value data store most commonly used as a primary database, cache, message broker, and queue. Redis delivers sub-millisecond response times, enabling fast and powerful real-time applications in industries such as gaming, fintech, ad-tech, social media, healthcare, and IoT. You can run a Redis database directly over your local mac os or in a container. If you have Docker installed in your sytem, type the following command:

     docker run -d -p 6379:6379 redis/redis-stack

    You can connect to Redis server using the redis-cli command like this:

     redis-cli

    The above command will make a connection to the Redis server. It will then present a prompt that allows you to run Redis commands.

    Step 2. Initialise the Go Module

    In order to connect to the Redis instance and return some data value, first you need to initialize the Go module as shown:

     go mod init github.com/my/repo

    Step 3. Install redis/v8

     go get github.com/go-redis/redis/v8

    Step 4. Create a main.go file

    Let us create a main.go file and write the following code to check for your Redis instance connection

     package main

    import (
    "fmt"
    "github.com/go-redis/redis"
    )

    func main() {
    fmt.Println("Testing Golang Redis")

    client := redis.NewClient(&redis.Options{
    Addr: "localhost:6379",
    Password: "",
    DB: 0,
    })

    pong, err := client.Ping().Result()
    fmt.Println(pong, err)

    }

    Step 5. Begin the compilation

     go run main.go

    By now, the Go application should successfully connect to the Redis instance and return data value (a successful "PONG" response).

    Redis Launchpad

    Redis Launchpad is like an “App Store” for Redis sample apps. You can easily find apps for your preferred frameworks and languages. Check out a few of these apps below, or click here to access the complete list.

    Rate-Limiting app in Go

    launchpad

    Rate Limiting app built in Go

    Leaderboard app in Go

    launchpad

    How to implement leaderboard app in Go

    Technical Articles & Whitepapers

    Redis and Golang: Designed to Improve Performance Redisqueue - A producer and consumer of a queue that uses Redis streamsA High Performance Recommendation Engine with Redis and Go**

    - + \ No newline at end of file diff --git a/develop/guides/netlify/getting-started/index.html b/develop/guides/netlify/getting-started/index.html index 560c5ee133..5b3be18a94 100644 --- a/develop/guides/netlify/getting-started/index.html +++ b/develop/guides/netlify/getting-started/index.html @@ -4,7 +4,7 @@ Getting Started with Netlify | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Getting Started with Netlify

    Step 1. Preparing the local environment

    - + \ No newline at end of file diff --git a/develop/index.html b/develop/index.html index b53f05c5fd..f3e8a4701a 100644 --- a/develop/index.html +++ b/develop/index.html @@ -4,7 +4,7 @@ Develop your application using programming languages | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Develop your application using programming languages

    Find documentation, sample code and tools to develop with your favorite language.

    Getting Started with Java and Redis
    Getting Started with Python and Redis
    Getting Started with Node.js and Redis
    Getting Started with Redis in .NET
    Getting Started with Golang and Redis
    Getting Started with Ruby and Redis
    Getting Started with PHP and Redis
    - + \ No newline at end of file diff --git a/develop/java/getting-started/index.html b/develop/java/getting-started/index.html index e6755a322d..f838d7086b 100644 --- a/develop/java/getting-started/index.html +++ b/develop/java/getting-started/index.html @@ -4,7 +4,7 @@ Java and Redis | The Home of Redis Developers - + @@ -18,7 +18,7 @@ See how to use Redis and Spring to build a product catalog with streams, hashes and Search

    Redis Stream in Action (Spring) See how to use Spring to create multiple producer and consumers with Redis Streams

    Rate Limiting with Vert.x See how to use Redis Sorted Set with Vert.x to build a rate limiting service.

    Redis University

    Redis for Java Developers

    Redis for Java Developers teaches you how to build robust Redis client applications in Java using the Jedis client library. The course focuses on writing idiomatic Java applications with the Jedis API, describing language-specific patterns for managing Redis database connections, handling errors, and using standard classes from the JDK. The course material uses the Jedis API directly with no additional frameworks. As such, the course is appropriate for all Java developers, and it clearly illustrates the principles involved in writing applications with Redis.

    - + \ No newline at end of file diff --git a/develop/java/index.html b/develop/java/index.html index 195b2f3ca4..c1cf3c8bff 100644 --- a/develop/java/index.html +++ b/develop/java/index.html @@ -4,7 +4,7 @@ Java and Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Java and Redis

    Explore the many different ways to build Java applications powered by Redis:

    Java and Redis
    Data-Driven Applications with Spring Boot and Redis
    - + \ No newline at end of file diff --git a/develop/java/redis-and-spring-course/index.html b/develop/java/redis-and-spring-course/index.html index 05eef48237..b1af8abb64 100644 --- a/develop/java/redis-and-spring-course/index.html +++ b/develop/java/redis-and-spring-course/index.html @@ -4,7 +4,7 @@ Getting Started with Spring Data Redis | The Home of Redis Developers - + @@ -19,7 +19,7 @@ milestones in the form of Git branches that can help you pick up the project at any specific lesson.

    Prerequisites

    To get the most from this course, you'll need a machine that can run the application and the Redis server, which is provided as a Docker container. You'll also need the following installed on your machine:

    Let's Learn Together

    We're here to support your learning through a dedicated Discord channel that I'll be monitoring along with other teaching assistants. Join us on the Redis Discord server.

    Course Contents

    Create the skeleton for the course’s Spring Boot application.
    Introducing Spring Data Redis.
    Object Mapping & Redis Repositories.
    User/Roles & Secondary Indexes.
    Books, Categories & The Catalog.
    Domain Models with Redis JSON.
    Search with Redis Search.
    Recommendations with RedisGraph.
    Caching REST Services with Redis.
    - + \ No newline at end of file diff --git a/develop/java/redis-and-spring-course/lesson_1/index.html b/develop/java/redis-and-spring-course/lesson_1/index.html index 0dd8ac6821..c782c6956f 100644 --- a/develop/java/redis-and-spring-course/lesson_1/index.html +++ b/develop/java/redis-and-spring-course/lesson_1/index.html @@ -4,7 +4,7 @@ Spring and Redis: Up and Running | The Home of Redis Developers - + @@ -31,7 +31,7 @@ This speed comes from the fact that it stores and serves all data from RAM rather than disk. Redis is durable, so your data will be persisted but all reads will be from a copy of the data held in RAM. This makes Redis an excellent choice for applications that require real time data access.

    External Resources

    Here's some resources that we think will be useful to you as you discover Redis:

    • redis.io - the official website of open source Redis.
    • Redis Enterprise Cloud - a fully managed cloud service from Redis with a free plan for getting started.
    • The official Redis Docker image.
    • For a comprehensive introduction to Redis, we recommend taking a look at the RU101: Introduction to Redis Data Structures course at Redis University. In this free online course, you’ll learn about the data structures in Redis, and you’ll see how to practically apply them in the real world.
    - + \ No newline at end of file diff --git a/develop/java/redis-and-spring-course/lesson_2/index.html b/develop/java/redis-and-spring-course/lesson_2/index.html index 8c010ded21..832365287b 100644 --- a/develop/java/redis-and-spring-course/lesson_2/index.html +++ b/develop/java/redis-and-spring-course/lesson_2/index.html @@ -4,7 +4,7 @@ Introducing Spring Data Redis | The Home of Redis Developers - + @@ -20,7 +20,7 @@ We can now write strings to Redis through our REST controller. Next, let’s add a corresponding GET method to our controller to read string values:

    @GetMapping("/strings/{key}")
    public Map.Entry<String, String> getString(@PathVariable("key") String key) {
    String value = template.opsForValue().get(STRING_KEY_PREFIX + key);

    if (value == null) {
    throw new ResponseStatusException(HttpStatus.NOT_FOUND, "key not found");
    }

    return new SimpleEntry<String, String>(key, value);
    }

    With imports:

    import java.util.AbstractMap.SimpleEntry;
    import org.springframework.web.bind.annotation.GetMapping;
    import org.springframework.web.bind.annotation.PathVariable;
    import org.springframework.web.server.ResponseStatusException;

    We can now issue a GET request to retrieve String keys:

    $ curl --location --request GET 'http://localhost:8080/api/redis/strings/database:redis:creator'
    {"database:redis:creator":"Salvatore Sanfilippo"}

    On the Redis CLI monitor you should see:

    1617347871.901585 [0 172.19.0.1:58284] "GET" "redi2read:strings:database:redis:creator"

    Note that in order to return an error on a key not found, we have to check the result for null and throw an appropriate exception.

    {
    "timestamp": "2021-04-02T07:45:10.303+00:00",
    "status": 404,
    "error": "Not Found",
    "trace": "org.springframework.web.server.ResponseStatusException: 404...\n",
    "message": "key not found",
    "path": "/api/redis/strings/database:neo4j:creator"
    }

    Keep in mind that this is a “development time” exception, appropriate to be shown on an error page meant for developers. Likely, we would intercept this exception and create an API appropriate response (likely just the status and error fields above).

    - + \ No newline at end of file diff --git a/develop/java/redis-and-spring-course/lesson_3/index.html b/develop/java/redis-and-spring-course/lesson_3/index.html index 206893c223..49c5de9443 100644 --- a/develop/java/redis-and-spring-course/lesson_3/index.html +++ b/develop/java/redis-and-spring-course/lesson_3/index.html @@ -4,7 +4,7 @@ Object Mapping & Redis Repositories | The Home of Redis Developers - + @@ -34,7 +34,7 @@ with the customary log level logging methods. On server start we should now see, once, the output of our database seeding.

    2021-04-02 19:28:25.367  INFO 94971 --- [  restartedMain] c.r.edu.redi2read.Redi2readApplication   : Started Redi2readApplication in 2.146 seconds (JVM running for 2.544)
    2021-04-02 19:28:25.654 INFO 94971 --- [ restartedMain] c.r.edu.redi2read.boot.CreateRoles : >>>> Created admin and customer roles...

    Let’s use the Redis CLI to explore how the Roles were stored, let’s use the KEYS command passing the Role fully qualified class name and a wildcard. Resulting in:

    127.0.0.1:6379> KEYS com.redislabs.edu.redi2read.models.Role*
    1) "com.redislabs.edu.redi2read.models.Role:c4219654-0b79-4ee6-b928-cb75909c4464"
    2) "com.redislabs.edu.redi2read.models.Role:9d383baf-35a0-4d20-8296-eedc4bea134a"
    3) "com.redislabs.edu.redi2read.models.Role"

    The first two values are Hashes, actual instances of the Role class. The string after the : is the primary key of the individual Role. Let’s inspect one of those hashes:

    127.0.0.1:6379> TYPE "com.redislabs.edu.redi2read.models.Role:c4219654-0b79-4ee6-b928-cb75909c4464"
    hash
    127.0.0.1:6379> HGETALL "com.redislabs.edu.redi2read.models.Role:c4219654-0b79-4ee6-b928-cb75909c4464"
    1) "_class"
    2) "com.redislabs.edu.redi2read.models.Role"
    3) "id"
    4) "c4219654-0b79-4ee6-b928-cb75909c4464"
    5) "name"
    6) "admin"

    Using the TYPE command returns, as expected that the value under the key is a Redis Hash. We use the HGETALL to “Get All” values in the Hash. The _class is a metadata field which demarks the class of the object stored in the Hash. Now let’s inspect the third value in the KEYS list:

    127.0.0.1:6379> TYPE "com.redislabs.edu.redi2read.models.Role"
    set
    127.0.0.1:6379> SMEMBERS "com.redislabs.edu.redi2read.models.Role"
    1) "9d383baf-35a0-4d20-8296-eedc4bea134a"
    2) "c4219654-0b79-4ee6-b928-cb75909c4464"

    The Redis Set under the mapped class name is used to keep the primary keys maintained for a given class.

    - + \ No newline at end of file diff --git a/develop/java/redis-and-spring-course/lesson_4/index.html b/develop/java/redis-and-spring-course/lesson_4/index.html index 023d08b53e..0abd6b5cfb 100644 --- a/develop/java/redis-and-spring-course/lesson_4/index.html +++ b/develop/java/redis-and-spring-course/lesson_4/index.html @@ -4,7 +4,7 @@ User/Roles & Secondary Indexes | The Home of Redis Developers - + @@ -36,7 +36,7 @@ We wrap the result in a list to match the result type of the method. We use Optional to handle a null result from the finder. And don’t forget your imports:

    import java.util.Collections;
    import java.util.List;
    import java.util.Optional;
    import org.springframework.web.bind.annotation.RequestParam;

    Invoking the endpoint with curl:

    curl --location --request GET 'http://localhost:8080/api/users/?email=donald.gibson@example.com'

    Returns the expected result:

    [
    {
    "id": "-1266125356844480724",
    "name": "Donald Gibson",
    "email": "donald.gibson@example.com",
    "roles": [
    {
    "id": "a9f9609f-c173-4f48-a82d-ca88b0d62d0b",
    "name": "customer"
    }
    ]
    }
    ]
    - + \ No newline at end of file diff --git a/develop/java/redis-and-spring-course/lesson_5/index.html b/develop/java/redis-and-spring-course/lesson_5/index.html index ce0befbaa4..823e7a43df 100644 --- a/develop/java/redis-and-spring-course/lesson_5/index.html +++ b/develop/java/redis-and-spring-course/lesson_5/index.html @@ -4,7 +4,7 @@ Books, Categories & The Catalog | The Home of Redis Developers - + @@ -38,7 +38,7 @@ In the file src/main/resources/application.properties add the following values:

    app.numberOfRatings=5000
    app.ratingStars=5

    Implementing Pagination with All Books

    Pagination is helpful when we have a large dataset and want to present it to the user in smaller chunks. As we learned earlier in the lesson, the BookRepository extends the PagingAndSortingRepository, which is built on top of the CrudRepository. In this section, we will refactor the BookController all method to work with the pagination features of the PagingAndSortingRepository. Replace the previously created all method with the following contents:

      @GetMapping
    public ResponseEntity<Map<String, Object>> all( //
    @RequestParam(defaultValue = "0") Integer page, //
    @RequestParam(defaultValue = "10") Integer size //
    ) {
    Pageable paging = PageRequest.of(page, size);
    Page<Book> pagedResult = bookRepository.findAll(paging);
    List<Book> books = pagedResult.hasContent() ? pagedResult.getContent() : Collections.emptyList();

    Map<String, Object> response = new HashMap<>();
    response.put("books", books);
    response.put("page", pagedResult.getNumber());
    response.put("pages", pagedResult.getTotalPages());
    response.put("total", pagedResult.getTotalElements());

    return new ResponseEntity<>(response, new HttpHeaders(), HttpStatus.OK);
    }

    Let’s break down the refactoring:

    • We want to control the method return value so we’ll use a ResponseEntity, which is an extension of HttpEntity and gives us control over the HTTP status code, headers, and body.
    • For the return type, we wrap a Map<String,Object> to return the collection of books as well as pagination data.
    • We’ve added two request parameters (HTTP query params) of type integer for the page number being retrieved and the size of the page. The page number defaults to 0 and the size of the page defaults to 10.
    • In the body of the method, we use the Pageable and PageRequest abstractions to construct the paging request.
    • We get a Page<Book> result by invoking the findAll method, passing the Pageable paging request.
    • If the returned page contains any items, we add them to the response object. Otherwise, we add an empty list.
    • The response is constructed by instantiating a Map and adding the books, current page, total number of pages, and total number of books.
    • Finally we package the response map into a ResponseEntity.

    Let’s fire up a pagination request with curl as shown next:

    curl --location --request GET 'http://localhost:8080/api/books/?size=25&page=2'

    Passing a page size of 25 and requesting page number 2, we get the following:

    {
    "total": 2403,
    "books": [
    {
    "id": "1786469960",
    "title": "Data Visualization with D3 4.x Cookbook",
    "subtitle": null,
    "description": "Discover over 65 recipes to help you create breathtaking data visualizations using the latest features of D3...",
    "language": "en",
    "pageCount": 370,
    "thumbnail": "http://books.google.com/books/content?id=DVQoDwAAQBAJ&printsec=frontcover&img=1&zoom=1&edge=curl&source=gbs_api",
    "price": 22.39,
    "currency": "USD",
    "infoLink": "https://play.google.com/store/books/details?id=DVQoDwAAQBAJ&source=gbs_api",
    "authors": [
    "Nick Zhu"
    ],
    "categories": [
    {
    "id": "f2ada1e2-7c18-4d90-bfe7-e321b650c0a3",
    "name": "redis"
    }
    ]
    },
    {
    "id": "111871735X",
    "title": "Android Programming",
    "subtitle": "Pushing the Limits",
    "description": "Unleash the power of the Android OS and build the kinds ofbrilliant, innovative apps users love to use ...",
    "language": "en",
    "pageCount": 432,
    "thumbnail": "http://books.google.com/books/content?id=SUWPAQAAQBAJ&printsec=frontcover&img=1&zoom=1&edge=curl&source=gbs_api",
    "price": 30.0,
    "currency": "USD",
    "infoLink": "https://play.google.com/store/books/details?id=SUWPAQAAQBAJ&source=gbs_api",
    "authors": [
    "Erik Hellman"
    ],
    "categories": [
    {
    "id": "47d9a769-bbc2-4068-b27f-2b800bec1565",
    "name": "kotlin"
    }
    ]
    },
    - + \ No newline at end of file diff --git a/develop/java/redis-and-spring-course/lesson_6/index.html b/develop/java/redis-and-spring-course/lesson_6/index.html index 23975317ec..413e8790b9 100644 --- a/develop/java/redis-and-spring-course/lesson_6/index.html +++ b/develop/java/redis-and-spring-course/lesson_6/index.html @@ -4,7 +4,7 @@ Domain Models with Redis | The Home of Redis Developers - + @@ -31,7 +31,7 @@ We search for the cart by ID.

    • If we find the cart, we search for the index of the item to be removed in the array of cart items.
    • If we find the item, we use the JSON.ARRPOP command to remove the item by its index at the JSONPath expression “.cartItems”.
    public void removeFromCart(String id, String isbn) {
    Optional<Cart> cartFinder = cartRepository.findById(id);
    if (cartFinder.isPresent()) {
    Cart cart = cartFinder.get();
    String cartKey = CartRepository.getKey(cart.getId());
    List<CartItem> cartItems = new ArrayList<CartItem>(cart.getCartItems());
    OptionalLong cartItemIndex = LongStream.range(0, cartItems.size()).filter(i -> cartItems.get((int) i).getIsbn().equals(isbn)).findFirst();
    if (cartItemIndex.isPresent()) {
    redisJson.arrPop(cartKey, CartItem.class, cartItemsPath, cartItemIndex.getAsLong());
    }
    }
    }

    Generating Random Carts

    We now have all the pieces in place to create a CommandLineRunner that can generate random carts for our users. As done previously, we will set the number of carts generated using an application property. To do so, add the following to the file src/main/resources/application.properties:

    app.numberOfCarts=2500

    The CreateCarts CommandLineRunner is shown below. Add it to the boot package.

    package com.redislabs.edu.redi2read.boot;

    import java.util.HashSet;
    import java.util.Random;
    import java.util.Set;
    import java.util.stream.IntStream;

    import com.redislabs.edu.redi2read.models.Book;
    import com.redislabs.edu.redi2read.models.Cart;
    import com.redislabs.edu.redi2read.models.CartItem;
    import com.redislabs.edu.redi2read.models.User;
    import com.redislabs.edu.redi2read.repositories.BookRepository;
    import com.redislabs.edu.redi2read.repositories.CartRepository;
    import com.redislabs.edu.redi2read.services.CartService;

    import org.springframework.beans.factory.annotation.Autowired;
    import org.springframework.beans.factory.annotation.Value;
    import org.springframework.boot.CommandLineRunner;
    import org.springframework.core.annotation.Order;
    import org.springframework.data.redis.core.RedisTemplate;
    import org.springframework.stereotype.Component;

    import lombok.extern.slf4j.Slf4j;

    @Component
    @Order(5)
    @Slf4j
    public class CreateCarts implements CommandLineRunner {

    @Autowired
    private RedisTemplate<String, String> redisTemplate;

    @Autowired
    CartRepository cartRepository;

    @Autowired
    BookRepository bookRepository;

    @Autowired
    CartService cartService;

    @Value("${app.numberOfCarts}")
    private Integer numberOfCarts;

    @Override
    public void run(String... args) throws Exception {
    if (cartRepository.count() == 0) {
    Random random = new Random();

    // loops for the number of carts to create
    IntStream.range(0, numberOfCarts).forEach(n -> {
    // get a random user
    String userId = redisTemplate.opsForSet()//
    .randomMember(User.class.getName());

    // make a cart for the user
    Cart cart = Cart.builder()//
    .userId(userId) //
    .build();

    // get between 1 and 7 books
    Set<Book> books = getRandomBooks(bookRepository, 7);

    // add to cart
    cart.setCartItems(getCartItemsForBooks(books));

    // save the cart
    cartRepository.save(cart);

    // randomly checkout carts
    if (random.nextBoolean()) {
    cartService.checkout(cart.getId());
    }
    });

    log.info(">>>> Created Carts...");
    }
    }

    private Set<Book> getRandomBooks(BookRepository bookRepository, int max) {
    Random random = new Random();
    int howMany = random.nextInt(max) + 1;
    Set<Book> books = new HashSet<Book>();
    IntStream.range(1, howMany).forEach(n -> {
    String randomBookId = redisTemplate.opsForSet().randomMember(Book.class.getName());
    books.add(bookRepository.findById(randomBookId).get());
    });

    return books;
    }

    private Set<CartItem> getCartItemsForBooks(Set<Book> books) {
    Set<CartItem> items = new HashSet<CartItem>();
    books.forEach(book -> {
    CartItem item = CartItem.builder()//
    .isbn(book.getId()) //
    .price(book.getPrice()) //
    .quantity(1L) //
    .build();
    items.add(item);
    });

    return items;
    }
    }

    Let’s break down the CreateCarts class:

    • As with other CommandLineRunners, we check that there are no carts created.
    • For each cart to be created, we
    • Retrieve a random user.
    • Create a cart for the user.
    • Retrieve between 1 and 7 books.
    • Add the cart items to the cart for the retrieved books.
    • Randomly “checkout” the cart.

    There are two private utility methods at the bottom of the class to get a random number of books and to create cart items from a set of books. Upon server start (after some CPU cycles) you should see:

    2021-04-04 14:58:08.737  INFO 31459 --- [  restartedMain] c.r.edu.redi2read.boot.CreateCarts       : >>>> Created Carts...

    We can now use the Redis CLI to get a random cart key from the cart set, check the type of one of the keys (ReJSON-RL) and use the JSON.GET command to retrieve the JSON payload:

    127.0.0.1:6379> SRANDMEMBER "com.redislabs.edu.redi2read.models.Cart"
    "com.redislabs.edu.redi2read.models.Cart:dcd6a6c3-59d6-43b4-8750-553d159cdeb8"
    127.0.0.1:6379> TYPE "com.redislabs.edu.redi2read.models.Cart:dcd6a6c3-59d6-43b4-8750-553d159cdeb8"
    ReJSON-RL
    127.0.0.1:6379> JSON.GET "com.redislabs.edu.redi2read.models.Cart:dcd6a6c3-59d6-43b4-8750-553d159cdeb8"
    "{\"id\":\"dcd6a6c3-59d6-43b4-8750-553d159cdeb8\",\"userId\":\"-3356969291827598172\",\"cartItems\":[{\"isbn\":\"1784391093\",\"price\":17.190000000000001,\"quantity\":1},{\"isbn\":\"3662433524\",\"price\":59.990000000000002,\"quantity\":1}]}"

    The Cart Controller

    The CartController is mostly a pass-through to the CartService (as controllers are intended to be).

    package com.redislabs.edu.redi2read.controllers;

    import com.redislabs.edu.redi2read.models.Cart;
    import com.redislabs.edu.redi2read.models.CartItem;
    import com.redislabs.edu.redi2read.services.CartService;

    import org.springframework.beans.factory.annotation.Autowired;
    import org.springframework.web.bind.annotation.DeleteMapping;
    import org.springframework.web.bind.annotation.GetMapping;
    import org.springframework.web.bind.annotation.PathVariable;
    import org.springframework.web.bind.annotation.PostMapping;
    import org.springframework.web.bind.annotation.RequestBody;
    import org.springframework.web.bind.annotation.RequestMapping;
    import org.springframework.web.bind.annotation.RestController;

    @RestController
    @RequestMapping("/api/carts")
    public class CartController {

    @Autowired
    private CartService cartService;

    @GetMapping("/{id}")
    public Cart get(@PathVariable("id") String id) {
    return cartService.get(id);
    }

    @PostMapping("/{id}")
    public void addToCart(@PathVariable("id") String id, @RequestBody CartItem item) {
    cartService.addToCart(id, item);
    }

    @DeleteMapping("/{id}")
    public void removeFromCart(@PathVariable("id") String id, @RequestBody String isbn) {
    cartService.removeFromCart(id, isbn);
    }

    @PostMapping("/{id}/checkout")
    public void checkout(@PathVariable("id") String id) {
    cartService.checkout(id);
    }

    }

    Let’s use curl to request a cart by its ID:

    curl --location --request GET 'http://localhost:8080/api/carts/dcd6a6c3-59d6-43b4-8750-553d159cdeb8'

    Which should return a payload like:

    {
    "id": "dcd6a6c3-59d6-43b4-8750-553d159cdeb8",
    "userId": "-3356969291827598172",
    "cartItems": [
    {
    "isbn": "1784391093",
    "price": 17.19,
    "quantity": 1
    },
    {
    "isbn": "3662433524",
    "price": 59.99,
    "quantity": 1
    }
    ],
    "total": 77.18
    }
    - + \ No newline at end of file diff --git a/develop/java/redis-and-spring-course/lesson_7/index.html b/develop/java/redis-and-spring-course/lesson_7/index.html index 69dbf7277f..5285c061df 100644 --- a/develop/java/redis-and-spring-course/lesson_7/index.html +++ b/develop/java/redis-and-spring-course/lesson_7/index.html @@ -4,7 +4,7 @@ Search with Redis | The Home of Redis Developers - + @@ -42,7 +42,7 @@ Unlike search indexes, which RediSearch maintains automatically, you maintain suggestion dictionaries manually using FT.SUGADD and FT.SUGDEL. Add the property for the name of the auto-complete dictionary to src/main/resources/application.properties:

    app.autoCompleteKey=author-autocomplete

    Add the file src/main/java/com/redislabs/edu/redi2read/boot/CreateAuthorNameSuggestions.java with the following contents:

    package com.redislabs.edu.redi2read.boot;

    import com.redislabs.edu.redi2read.repositories.BookRepository;
    import com.redislabs.lettusearch.RediSearchCommands;
    import com.redislabs.lettusearch.StatefulRediSearchConnection;
    import com.redislabs.lettusearch.Suggestion;

    import org.springframework.beans.factory.annotation.Autowired;
    import org.springframework.beans.factory.annotation.Value;
    import org.springframework.boot.CommandLineRunner;
    import org.springframework.core.annotation.Order;
    import org.springframework.data.redis.core.RedisTemplate;
    import org.springframework.stereotype.Component;

    import lombok.extern.slf4j.Slf4j;

    @Component
    @Order(7)
    @Slf4j
    public class CreateAuthorNameSuggestions implements CommandLineRunner {

    @Autowired
    private RedisTemplate<String, String> redisTemplate;

    @Autowired
    private BookRepository bookRepository;

    @Autowired
    private StatefulRediSearchConnection<String, String> searchConnection;

    @Value("${app.autoCompleteKey}")
    private String autoCompleteKey;

    @Override
    public void run(String... args) throws Exception {
    if (!redisTemplate.hasKey(autoCompleteKey)) {
    RediSearchCommands<String, String> commands = searchConnection.sync();
    bookRepository.findAll().forEach(book -> {
    if (book.getAuthors() != null) {
    book.getAuthors().forEach(author -> {
    Suggestion<String> suggestion = Suggestion.builder(author).score(1d).build();
    commands.sugadd(autoCompleteKey, suggestion);
    });
    }
    });

    log.info(">>>> Created Author Name Suggestions...");
    }
    }
    }

    Let’s break down the logic of the CreateAuthorNameSuggestions CommandLineRunner:

    • First, we guarantee a single execution by checking for the existence of the key for the auto-complete dictionary.
    • Then, using the BookRepository we loop over all books
    • For each author in a book we add a suggestion to the dictionary

    To use the auto-suggestion feature in the controller, we can add a new method:

    @Value("${app.autoCompleteKey}")
    private String autoCompleteKey;

    @GetMapping("/authors")
    public List<Suggestion<String>> authorAutoComplete(@RequestParam(name="q")String query) {
    RediSearchCommands<String, String> commands = searchConnection.sync();
    SuggetOptions options = SuggetOptions.builder().max(20L).build();
    return commands.sugget(autoCompleteKey, query, options);
    }

    With imports:

    import com.redislabs.lettusearch.Suggestion;
    import com.redislabs.lettusearch.SuggetOptions;

    In the authorAutoComplete method, we use the FT.SUGGET command (via the sugget method from the RediSearchCommands object) and build a query using a SuggetOptions configuration. In the example above, we set the maximum number of results to 20. We can use curl to craft a request to our new endpoint. In this example, I’m passing “brian s” as the query:

    curl --location --request GET 'http://localhost:8080/api/books/authors/?q=brian%20s'

    This results in a response with 2 JSON objects:

    [
    {
    "string": "Brian Steele",
    "score": null,
    "payload": null
    },
    {
    "string": "Brian Sam-Bodden",
    "score": null,
    "payload": null
    }
    ]

    If we add one more letter to our query to make it “brian sa”:

    curl --location --request GET 'http://localhost:8080/api/books/authors/?q=brian%20sa'

    We get the expected narrowing of the suggestion set:

    [
    {
    "string": "Brian Sam-Bodden",
    "score": null,
    "payload": null
    }
    ]
    - + \ No newline at end of file diff --git a/develop/java/redis-and-spring-course/lesson_8/index.html b/develop/java/redis-and-spring-course/lesson_8/index.html index 9f03618ca9..d6f6416909 100644 --- a/develop/java/redis-and-spring-course/lesson_8/index.html +++ b/develop/java/redis-and-spring-course/lesson_8/index.html @@ -4,7 +4,7 @@ Recommendations with RedisGraph | The Home of Redis Developers - + @@ -44,7 +44,7 @@ To find books that are frequently bought together give an ISBN we use the query:

    MATCH (u:User)-[:PURCHASED]->(b1:Book {id: '%s'})
    MATCH (b1)<-[:PURCHASED]-(u)-[:PURCHASED]->(b2:Book)
    MATCH rated = (User)-[:RATED]-(b2) " //
    WITH b1, b2, count(b2) as freq, head(relationships(rated)) as r
    WHERE b1 <> b2
    RETURN b2, freq, avg(r.rating)
    ORDER BY freq, avg(r.rating) DESC

    Let's break it down:

    • The first MATCH find users that have the bought the target book
    • The second MATCH finds other books purchased by those users
    • The third MATCH find the ratings if any for those books
    • The WITH clause groups the values gathered so far, counts number of purchases of the collected books and grab the metadata from the RATED notes
    • The RETURN line returns the collected books, with the number of purchases and their average star rating

    To implement the above query in our service add the following method:

      public Set<String> getFrequentlyBoughtTogether(String isbn) {
    Set<String> recommendations = new HashSet<String>();

    String query = "MATCH (u:User)-[:PURCHASED]->(b1:Book {id: '%s'}) " //
    + "MATCH (b1)<-[:PURCHASED]-(u)-[:PURCHASED]->(b2:Book) " //
    + "MATCH rated = (User)-[:RATED]-(b2) " //
    + "WITH b1, b2, count(b2) as freq, head(relationships(rated)) as r " //
    + "WHERE b1 <> b2 " //
    + "RETURN b2, freq, avg(r.rating) " //
    + "ORDER BY freq, avg(r.rating) DESC";

    ResultSet resultSet = graph.query(graphId, String.format(query, isbn));
    while (resultSet.hasNext()) {
    Record record = resultSet.next();
    Node book = record.getValue("b2");
    recommendations.add(book.getProperty("id").getValue().toString());
    }
    return recommendations;
    }

    The Recommendations Controller

    To serve our recommendations we will expose the recommendation service using the RecommendationController. Create the src/main/java/com/redislabs/edu/redi2read/controllers/RecommendationController.java file and add the contents as follows:

    package com.redislabs.edu.redi2read.controllers;

    import java.util.Set;

    import com.redislabs.edu.redi2read.services.RecommendationService;

    import org.springframework.beans.factory.annotation.Autowired;
    import org.springframework.web.bind.annotation.GetMapping;
    import org.springframework.web.bind.annotation.PathVariable;
    import org.springframework.web.bind.annotation.RequestMapping;
    import org.springframework.web.bind.annotation.RestController;

    @RestController
    @RequestMapping("/api/recommendations")
    public class RecommendationController {

    @Autowired
    private RecommendationService recommendationService;

    @GetMapping("/user/{userId}")
    public Set<String> userRecommendations(@PathVariable("userId") String userId) {
    return recommendationService.getBookRecommendationsFromCommonPurchasesForUser(userId);
    }

    @GetMapping("/isbn/{isbn}/pairings")
    public Set<String> frequentPairings(@PathVariable("isbn") String isbn) {
    return recommendationService.getFrequentlyBoughtTogether(isbn);
    }
    }

    You can invoke the recommendation service with a curl request like:

    curl --location --request GET 'http://localhost:8080/api/recommendations/user/55214615117483454'

    or:

    curl --location --request GET 'http://localhost:8080/api/recommendations/isbn/1789610222/pairings'
    - + \ No newline at end of file diff --git a/develop/java/redis-and-spring-course/lesson_9/index.html b/develop/java/redis-and-spring-course/lesson_9/index.html index 8914750169..31c902bdc0 100644 --- a/develop/java/redis-and-spring-course/lesson_9/index.html +++ b/develop/java/redis-and-spring-course/lesson_9/index.html @@ -4,7 +4,7 @@ Caching REST Services with Redis | The Home of Redis Developers - + @@ -25,7 +25,7 @@ in the case of a cache hit, it will return its value. Otherwise, in case of a miss, it will store the cache’s search method’s return value, allowing the method to execute as if there was no cache at all. If we try the request http://localhost:8080/api/books/search?q=java:

    curl --location --request GET 'http://localhost:8080/api/books/search?q=java'

    On the first request we get a 28 ms response time:

    PostMan Request 2

    Subsequent responses return in the range of 8 ms to 10 ms consistently:

    PostMan Request 1

    - + \ No newline at end of file diff --git a/develop/java/spring/index.html b/develop/java/spring/index.html index 88c422ad78..a4a38ae217 100644 --- a/develop/java/spring/index.html +++ b/develop/java/spring/index.html @@ -4,7 +4,7 @@ Spring and Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@ - + \ No newline at end of file diff --git a/develop/java/spring/rate-limiting/fixed-window/index.html b/develop/java/spring/rate-limiting/fixed-window/index.html index eea6a70e0b..b85b4a96e5 100644 --- a/develop/java/spring/rate-limiting/fixed-window/index.html +++ b/develop/java/spring/rate-limiting/fixed-window/index.html @@ -4,7 +4,7 @@ How to Implement Fixed Window Rate Limiting using Redis | The Home of Redis Developers - + @@ -26,7 +26,7 @@ per minute.

    This basic recipe using Redis Strings, a minute-size window and a quota of 20 requests is outlined on the Redis Blog. I'll summarize it here before we jump into out Spring Reactive implementation:

    1. GET [user-api-key]:[current minute number] such as GET "u123:45"
    2. If the result from line 1 is less than 20 (or the key is not found) go to step 4 otherwise continue to step 3
    3. Reject the request.
    4. In an atomic way (using MULTI and EXEC) increment the key and set the expiry to 59 seconds into the future.
    MULTI
    INCR [user-api-key]:[current minute number]
    EXPIRE [user-api-key]:[current minute number] 59
    EXEC
    1. Otherwise, fulfill the request.

    Ok, now that we know the basic recipe, let's implement it in Spring

    - + \ No newline at end of file diff --git a/develop/java/spring/rate-limiting/fixed-window/reactive-gears/index.html b/develop/java/spring/rate-limiting/fixed-window/reactive-gears/index.html index eca784e87c..9a50665833 100644 --- a/develop/java/spring/rate-limiting/fixed-window/reactive-gears/index.html +++ b/develop/java/spring/rate-limiting/fixed-window/reactive-gears/index.html @@ -4,7 +4,7 @@ Atomicity with Gears | The Home of Redis Developers - + @@ -36,7 +36,7 @@ the key, the quota and the TTL seconds in the future.

    As we've have done previously, if the function returns false we let the request through, otherwise we return an HTTP 429:

    @Override
    public Mono<ServerResponse> filter(ServerRequest request, HandlerFunction<ServerResponse> next) {
    int currentMinute = LocalTime.now().getMinute();
    String key = String.format("rl_%s:%s", requestAddress(request.remoteAddress()), currentMinute);

    RedisGearsCommands<String, String> gears = connection.sync();

    List<Object> results = gears.trigger("RateLimiter", key, Long.toString(maxRequestPerMinute), "59");
    if (!results.isEmpty() && !Boolean.parseBoolean((String) results.get(0))) {
    return next.handle(request);
    } else {
    return ServerResponse.status(TOO_MANY_REQUESTS).build();
    }
    }

    Testing with curl

    Once again, we use curl loop to test the limiter:

    for n in {1..22}; do echo $(curl -s -w " :: HTTP %{http_code}, %{size_download} bytes, %{time_total} s" -X GET http://localhost:8080/api/ping); sleep 0.5; done

    You should see the 21st request being rejected:

    for n in {1..22}; do echo $(curl -s -w " :: HTTP %{http_code}, %{size_download} bytes, %{time_total} s" -X GET http://localhost:8080/api/ping); sleep 0.5; done
    PONG :: HTTP 200, 4 bytes, 0.064786 s
    PONG :: HTTP 200, 4 bytes, 0.009926 s
    PONG :: HTTP 200, 4 bytes, 0.009546 s
    PONG :: HTTP 200, 4 bytes, 0.010189 s
    PONG :: HTTP 200, 4 bytes, 0.009399 s
    PONG :: HTTP 200, 4 bytes, 0.009210 s
    PONG :: HTTP 200, 4 bytes, 0.008333 s
    PONG :: HTTP 200, 4 bytes, 0.008009 s
    PONG :: HTTP 200, 4 bytes, 0.008919 s
    PONG :: HTTP 200, 4 bytes, 0.009271 s
    PONG :: HTTP 200, 4 bytes, 0.007515 s
    PONG :: HTTP 200, 4 bytes, 0.007057 s
    PONG :: HTTP 200, 4 bytes, 0.008373 s
    PONG :: HTTP 200, 4 bytes, 0.007573 s
    PONG :: HTTP 200, 4 bytes, 0.008209 s
    PONG :: HTTP 200, 4 bytes, 0.009080 s
    PONG :: HTTP 200, 4 bytes, 0.007595 s
    PONG :: HTTP 200, 4 bytes, 0.007955 s
    PONG :: HTTP 200, 4 bytes, 0.007693 s
    PONG :: HTTP 200, 4 bytes, 0.008743 s
    :: HTTP 429, 0 bytes, 0.007226 s
    :: HTTP 429, 0 bytes, 0.007388 s

    If we run Redis in monitor mode, we should see the Lua calls to RG.TRIGGER and under that you should see the calls to GET, INCR and EXPIRE for allowed requests:

    1631249244.006212 [0 172.17.0.1:56036] "RG.TRIGGER" "RateLimiter" "rl_localhost:47" "20" "59"
    1631249244.006995 [0 ?:0] "GET" "rl_localhost:47"
    1631249244.007182 [0 ?:0] "INCR" "rl_localhost:47"
    1631249244.007269 [0 ?:0] "EXPIRE" "rl_localhost:47" "59"

    And for rate limited request you should see only the call to GET:

    1631249244.538478 [0 172.17.0.1:56036] "RG.TRIGGER" "RateLimiter" "rl_localhost:47" "20" "59"
    1631249244.538809 [0 ?:0] "GET" "rl_localhost:47"

    The complete code for this implementation is under the branch with_gears.

    - + \ No newline at end of file diff --git a/develop/java/spring/rate-limiting/fixed-window/reactive-lua/index.html b/develop/java/spring/rate-limiting/fixed-window/reactive-lua/index.html index 4aba502839..37591ee9ef 100644 --- a/develop/java/spring/rate-limiting/fixed-window/reactive-lua/index.html +++ b/develop/java/spring/rate-limiting/fixed-window/reactive-lua/index.html @@ -4,7 +4,7 @@ Atomicity with Lua | The Home of Redis Developers - + @@ -31,7 +31,7 @@ hold the request quota.

    @Value("${MAX_REQUESTS_PER_MINUTE}")
    Long maxRequestPerMinute;

    In our application.properties we'll set it to a max of 20 request per minute:

    MAX_REQUESTS_PER_MINUTE=20

    To invoke the filter we use the newly modified constructor, passing the template, the script, and the maxRequestPerMinute value:

    @Bean
    RouterFunction<ServerResponse> routes() {
    return route() //
    .GET("/api/ping", r -> ok() //
    .contentType(TEXT_PLAIN) //
    .body(BodyInserters.fromValue("PONG")) //
    ).filter(new RateLimiterHandlerFilterFunction(redisTemplate, script(), maxRequestPerMinute)).build();
    }

    Testing with curl

    Using our trusty curl loop:

    for n in {1..22}; do echo $(curl -s -w " :: HTTP %{http_code}, %{size_download} bytes, %{time_total} s" -X GET http://localhost:8080/api/ping); sleep 0.5; done

    You should see the 21st request being rejected:

    for n in {1..22}; do echo $(curl -s -w " :: HTTP %{http_code}, %{size_download} bytes, %{time_total} s" -X GET http://localhost:8080/api/ping); sleep 0.5; done
    PONG :: HTTP 200, 4 bytes, 0.173759 s
    PONG :: HTTP 200, 4 bytes, 0.008903 s
    PONG :: HTTP 200, 4 bytes, 0.008796 s
    PONG :: HTTP 200, 4 bytes, 0.009625 s
    PONG :: HTTP 200, 4 bytes, 0.007604 s
    PONG :: HTTP 200, 4 bytes, 0.008052 s
    PONG :: HTTP 200, 4 bytes, 0.011364 s
    PONG :: HTTP 200, 4 bytes, 0.012158 s
    PONG :: HTTP 200, 4 bytes, 0.010415 s
    PONG :: HTTP 200, 4 bytes, 0.010373 s
    PONG :: HTTP 200, 4 bytes, 0.010009 s
    PONG :: HTTP 200, 4 bytes, 0.006587 s
    PONG :: HTTP 200, 4 bytes, 0.006807 s
    PONG :: HTTP 200, 4 bytes, 0.006970 s
    PONG :: HTTP 200, 4 bytes, 0.007948 s
    PONG :: HTTP 200, 4 bytes, 0.007949 s
    PONG :: HTTP 200, 4 bytes, 0.006606 s
    PONG :: HTTP 200, 4 bytes, 0.006336 s
    PONG :: HTTP 200, 4 bytes, 0.007855 s
    PONG :: HTTP 200, 4 bytes, 0.006515 s
    :: HTTP 429, 0 bytes, 0.006633 s
    :: HTTP 429, 0 bytes, 0.008264 s

    If we run Redis in monitor mode, we should see the Lua calls to EVALSHA, followed by the call to GET for a rejected request, and the same plus calls to INCR and EXPIRE for an allowed request:

    1630342834.878972 [0 172.17.0.1:65008] "EVALSHA" "16832548450a4b1c5e23ffab55bddefe972fecd2" "1" "rl_localhost:0" "20" "59"
    1630342834.879044 [0 lua] "GET" "rl_localhost:0"
    1630342834.879091 [0 lua] "INCR" "rl_localhost:0"
    1630342834.879141 [0 lua] "EXPIRE" "rl_localhost:0" "59"
    1630342835.401937 [0 172.17.0.1:65008] "EVALSHA" "16832548450a4b1c5e23ffab55bddefe972fecd2" "1" "rl_localhost:0" "20" "59"
    1630342835.402009 [0 lua] "GET" "rl_localhost:0"

    The complete code for this implementation is under the branch with_lua.

    - + \ No newline at end of file diff --git a/develop/java/spring/rate-limiting/fixed-window/reactive/index.html b/develop/java/spring/rate-limiting/fixed-window/reactive/index.html index d7d0578c45..8b6911a4e3 100644 --- a/develop/java/spring/rate-limiting/fixed-window/reactive/index.html +++ b/develop/java/spring/rate-limiting/fixed-window/reactive/index.html @@ -4,7 +4,7 @@ Reactive Implementation | The Home of Redis Developers - + @@ -45,7 +45,7 @@ curl flags used are as follows; first is -s that silences curl (makes it hide progress bar and errors), -w is the write out options in which we can pass a string with interpolated variables. Then we sleep 1/2 second between cycles.

    for n in {1..22}; do echo $(curl -s -w " :: HTTP %{http_code}, %{size_download} bytes, %{time_total} s" -X GET http://localhost:8080/api/ping); sleep 0.5; done
    PONG :: HTTP 200, 4 bytes, 0.393156 s
    PONG :: HTTP 200, 4 bytes, 0.019530 s
    PONG :: HTTP 200, 4 bytes, 0.023677 s
    PONG :: HTTP 200, 4 bytes, 0.019922 s
    PONG :: HTTP 200, 4 bytes, 0.025573 s
    PONG :: HTTP 200, 4 bytes, 0.018916 s
    PONG :: HTTP 200, 4 bytes, 0.019548 s
    PONG :: HTTP 200, 4 bytes, 0.018335 s
    PONG :: HTTP 200, 4 bytes, 0.010105 s
    PONG :: HTTP 200, 4 bytes, 0.008416 s
    PONG :: HTTP 200, 4 bytes, 0.009829 s
    PONG :: HTTP 200, 4 bytes, 0.011766 s
    PONG :: HTTP 200, 4 bytes, 0.010809 s
    PONG :: HTTP 200, 4 bytes, 0.015483 s
    PONG :: HTTP 200, 4 bytes, 0.009732 s
    PONG :: HTTP 200, 4 bytes, 0.009970 s
    PONG :: HTTP 200, 4 bytes, 0.008696 s
    PONG :: HTTP 200, 4 bytes, 0.009176 s
    PONG :: HTTP 200, 4 bytes, 0.009678 s
    PONG :: HTTP 200, 4 bytes, 0.012497 s
    :: HTTP 429, 0 bytes, 0.010071 s
    :: HTTP 429, 0 bytes, 0.006625 s

    If we run Redis in monitor mode, we should see the call to GET for a rejected request, and the same plus calls to INCR and EXPIRE for an allowed request:

    1630366639.188290 [0 172.17.0.1:65016] "GET" "rl_localhost:37"
    1630366639.200956 [0 172.17.0.1:65016] "INCR" "rl_localhost:37"
    1630366639.202372 [0 172.17.0.1:65016] "EXPIRE" "rl_localhost:37" "59"
    ...
    1630366649.891110 [0 172.17.0.1:65016] "GET" "rl_localhost:37"
    1630366650.417131 [0 172.17.0.1:65016] "GET" "rl_localhost:37"

    You can find this example on the main branch at https://github.com/redis-developer/fixed-window-rate-limiter

    - + \ No newline at end of file diff --git a/develop/java/spring/rate-limiting/getting-started/index.html b/develop/java/spring/rate-limiting/getting-started/index.html index 03041abc4a..ff9bb4c3dc 100644 --- a/develop/java/spring/rate-limiting/getting-started/index.html +++ b/develop/java/spring/rate-limiting/getting-started/index.html @@ -4,7 +4,7 @@ How to implement Rate Limiting in Spring Applications using Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    How to implement Rate Limiting in Spring Applications using Redis


    Profile picture for Brian Sam-Bodden
    Author:
    Brian Sam-Bodden, Developer Advocate at Redis

    In this series of mini-tutorials we'll explore several approaches to implement rate limiting in Spring applications using Redis. We’ll start with the most basic of Redis recipes and we’ll slowly increase the complexity of our implementations.

    my image

    What is Rate Limiting?

    Rate Limiting entails techniques to regulate the number of requests a particular client can make against a networked service. It caps the total number and/or the frequency of requests.

    Why do we need Rate Limiting?

    There are many reasons why you would want to add a rate limiter to your APIs, whether it is to prevent intentional or accidental API abuse, a rate limiter can stop the invaders at the gate. Let’s think of some scenarios where a rate limiter could save your bacon:

    • If you ever worked at an API-based startup, you know that to get anywhere you need a “free” tier. A free tier will get potential customers to try your service and spread the word. But without limiting the free tier users you could risk losing the few paid customers your startup has.
    • Programmatic integrations with your API could have bugs. Sometimes resource starvation is not caused by a malicious attack. These FFDoS (Friendly-Fire Denial of Service) attacks happen more often than you can imagine.
    • Finally, there are malicious players recruiting bots on a daily basis to make API providers’ lives miserable. Being able to detect and curtail those attacks before they impact your users could mean the life of our business.

    Rate limiting is typically implemented on the server-side but if you have control of the clients you can also preempt certain types of access at that point. It relies on three particular pieces of information:

    1. Who’s making the request: Identifying the source of the attack or abuse is the most important part of the equation. If the offending requests cannot be grouped and associated with a single entity you’ll be fighting in the dark.
    2. What’s the cost of the request: Not all requests are created equal, for example, a request that’s bound to a single account’s data, likely can only cause localized havoc, while a requests that spans multiple accounts, and/or broad spans of time (like multiple years) are much more expensive
    3. What is their allotted quota: How many total requests and/or what’s the rate of requests permitted for the user. For example, in the case of the "free tier" you might have a smaller allotment/bucket of requests they can make, or you migth reduce them during certain peek times.

    Why Redis for Rate Limiting?

    Redis is especially positioned as a platform to implement rate limiting for several reasons:

    • Speed: The checks and calculations required by a rate limiting implementation will add the total request-response times of your API, you want those operations to happen as fast as possible.
    • Centralization and distribution: Redis can seamlessly scale your single server/instance setup to hundreds of nodes without sacrificing performance or reliability.
    • The Right Abstractions: Redis provides optimized data structures to support several of the most common rate limiter implementations and with i’s built-in TTL (time-to-live controls) it allows for efficient management of memory. Counting things is a built-in feature in Redis and one of the many areas where Redis shines above the competition.

    Now, let’s get started with our first implementation; the simple “Fixed Window” implementation.

    Further Reading

    - + \ No newline at end of file diff --git a/develop/java/spring/rate-limiting/index.html b/develop/java/spring/rate-limiting/index.html index 11b4c8543c..b8bd57b758 100644 --- a/develop/java/spring/rate-limiting/index.html +++ b/develop/java/spring/rate-limiting/index.html @@ -4,7 +4,7 @@ Rate Limiting with Spring and Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Rate Limiting with Spring and Redis

    The following links provides you with the available options to develop your application using NodeJS and Redis

    Rate Limiting with Spring and Redis
    Cap the maximum number of requests in a fixed window of time
    Improving atomicity and performance with Lua
    Improving atomicity and performance with Triggers and Functions
    - + \ No newline at end of file diff --git a/develop/java/spring/redis-om/redis-om-spring-hash/index.html b/develop/java/spring/redis-om/redis-om-spring-hash/index.html index e57b4fe066..54933d0ae3 100644 --- a/develop/java/spring/redis-om/redis-om-spring-hash/index.html +++ b/develop/java/spring/redis-om/redis-om-spring-hash/index.html @@ -4,7 +4,7 @@ Enhanced Mapping of Java Objects to Hashes | The Home of Redis Developers - + @@ -52,7 +52,7 @@ a query declaration to the repository interface like:

    List<User> findByFirstNameAndLastName(String firstName, String lastName);

    In this case method findByFirstNameAndLastName is parsed and the And keyword is used to determine that the method is expecting 2 parameters; firstName and lastName.

    To test it we could add the following to our controller:

    @GetMapping("/q")
    public List<User> findByName(@RequestParam String firstName, @RequestParam String lastName) {
    return userRepository.findByFirstNameAndLastName(firstName, lastName);
    }

    Using CURL to test we

    curl --location --request GET 'http://localhost:8080/api/users/q?firstName=Brad&lastName=Wilk'
    [{"id":"01FNTE5KWCZ5H438JGB4AZWE85","firstName":"Brad","middleName":null,"lastName":"Wilk","email":"brad@ratm.com"}]

    Formatting the resulting JSON we can see the record for Brad Wilk is returned as the only element of the JSON Array result:

    [
    {
    "id": "01FNTE5KWCZ5H438JGB4AZWE85",
    "firstName": "Brad",
    "middleName": null,
    "lastName": "Wilk",
    "email": "brad@ratm.com"
    }
    ]

    Back on the Redis CLI monitor we can see the query generated by our repository method:

    1638343589.454213 [0 172.19.0.1:63406] "FT.SEARCH" "UserIdx" "@firstName:{Brad} @lastName:{Wilk} "

    Redis OM Spring, extends Spring Data Redis with search capabilities that rival the flexibility of JPA queries by using Redis' native Search and Query engine.

    - + \ No newline at end of file diff --git a/develop/java/spring/redis-om/redis-om-spring-json/index.html b/develop/java/spring/redis-om/redis-om-spring-json/index.html index 7bf7646344..88f3012d4f 100644 --- a/develop/java/spring/redis-om/redis-om-spring-json/index.html +++ b/develop/java/spring/redis-om/redis-om-spring-json/index.html @@ -4,7 +4,7 @@ Mapping Java Objects to JSON | The Home of Redis Developers - + @@ -71,7 +71,7 @@ logic and numerical operators like between, startingWith, greaterThan, lessThanOrEquals and many more.

    Below are some more examples of what's possible:

    // find by numeric property
    Iterable<Company> findByNumberOfEmployees(int noe);

    // find by numeric property range
    Iterable<Company> findByNumberOfEmployeesBetween(int noeGT, int noeLT);

    // starting with/ending with
    Iterable<Company> findByNameStartingWith(String prefix);

    What's Next

    This was but a brief tour of the capabilities of Redis OM Spring (ROMS). In the next installment we'll cover how ROMS extends Spring Data Redis Redis Hash mapping to make it even better.

    - + \ No newline at end of file diff --git a/develop/java/spring/redis-om/redis-om-spring/index.html b/develop/java/spring/redis-om/redis-om-spring/index.html index 719cc15d52..6c08d98613 100644 --- a/develop/java/spring/redis-om/redis-om-spring/index.html +++ b/develop/java/spring/redis-om/redis-om-spring/index.html @@ -4,7 +4,7 @@ Redis OM - Spring | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Redis OM - Spring


    Profile picture for Brian Sam-Bodden
    Author:
    Brian Sam-Bodden, Developer Advocate at Redis

    Introduction

    The aim of the Redis OM family of projects is to provide high-level abstractions idiomatically implemented for your language/platform of choice. We currently cater to the Node.js, Python, .NET and Spring communities.

    The Spring Framework is the leading full-stack Java/JEE application framework and Redis OM Spring (ROMS) goal is to enable developers to easily add the power of Redis to their Spring Boot applications.

    Redis OM Spring provides powerful repository and custom object-mapping abstractions built on top of the amazing Spring Data Redis (SDR) framework.

    The current preview release provides all of SDRs capabilities plus:

    • A @Document annotation to map Spring Data models to Redis JSON documents
    • Enhancements to SDR's @RedisHash via @EnableRedisEnhancedRepositories to:
      • use Redis' native search engine (Redis Search) for secondary indexing
      • use ULID indentifiers for @Id annotated fields
    • RedisDocumentRepository with automatic implementation of Repository interfaces for complex querying capabilities using @EnableRedisDocumentRepositories
    • Declarative Search Indices via @Indexable
    • Full-text Search Indices via @Searchable
    • @Bloom annotation to determine very quickly, and with high degree of certainty, whether a value is in a collection

    Tutorials to get you started

    Mapping Java Objects to JSON
    Enhanced Mapping of Java Objects to Hashes
    - + \ No newline at end of file diff --git a/develop/node/gettingstarted/index.html b/develop/node/gettingstarted/index.html index 2ec43d117a..32ad56adbb 100644 --- a/develop/node/gettingstarted/index.html +++ b/develop/node/gettingstarted/index.html @@ -4,7 +4,7 @@ Getting Started with Node and Redis | The Home of Redis Developers - + @@ -19,7 +19,7 @@ This application calls the GitHub API and caches the results into Redis.

    Redis Rate-Limiting This is a very simple app that demonstrates rate-limiting feature using Redis.

    Notifications with WebSocket, Vue & Redis This project allows you to push notifications in a Vue application from a Redis PUBLISH using WebSockets.

    Technical Articles & Videos

    Redis Rapid Tips: ioredis (YouTube)

    Mapping Objects between Node and Redis (YouTube)


    Redis University

    Redis for JavaScript Developers

    Build full-fledged Redis applications with Node.js and Express.

    - + \ No newline at end of file diff --git a/develop/node/index.html b/develop/node/index.html index 94990ed78d..0042824347 100644 --- a/develop/node/index.html +++ b/develop/node/index.html @@ -4,7 +4,7 @@ NodeJS and Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    NodeJS and Redis


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    The following links provides you with the available options to develop your application using NodeJS and Redis

    Node.js and Redis
    Node.js Crash Course
    Build a simple service with Redis OM for Node.js and Express
    - + \ No newline at end of file diff --git a/develop/node/node-crash-course/index.html b/develop/node/node-crash-course/index.html index 8cd3271f32..21409eb27b 100644 --- a/develop/node/node-crash-course/index.html +++ b/develop/node/node-crash-course/index.html @@ -4,7 +4,7 @@ The Node.js Crash Course | The Home of Redis Developers - + @@ -15,7 +15,7 @@ the ioredis client and Redis Stack.

    In this course, you'll learn about using Redis with Node.js through a blend of video and text-based training. You can also get hands-on with some optional workshop exercises where you'll add new functionality to an existing Node.js application.

    Welcome to the Node.js Redis Crash Course!
    Introducing Redis - the database that developers love!
    The ioredis client for Node.js.
    A visual tool for managing Redis.
    Introducing the sample application.
    Install, configure and run the sample application.
    Modeling domain objects with Redis Hashes.
    Extending the capabilities of Redis.
    Storing JSON documents in Redis.
    Indexing and querying Hashes with Redis as a Search and Query engine.
    Using Redis Streams to handle a data fire hose.
    How Redis helps your Node.js application to scale.
    Using Redis as a cache to speed up user experience.
    Horizontal scaling with Redis as a session store.
    Consumer groups - collaborative stream processing.
    A look at probabilistic data structures in Redis.
    Wrap up and next steps.
    - + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/advancedstreams/index.html b/develop/node/nodecrashcourse/advancedstreams/index.html index 243db7c477..e20fec0c48 100644 --- a/develop/node/nodecrashcourse/advancedstreams/index.html +++ b/develop/node/nodecrashcourse/advancedstreams/index.html @@ -4,7 +4,7 @@ Advanced Streams: Parallel Processing Checkins with Consumer Groups | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Advanced Streams: Parallel Processing Checkins with Consumer Groups


    Profile picture for Simon Prickett
    Author:
    Simon Prickett, Principal Developer Advocate at Redis

    As our application grows in popularity and our user base increases, we're receiving more and more checkins. Recall that checkins are added to a Redis Stream by the Checkin Receiver, and read from that stream by the Checkin Processor. The Stream acts as a buffer between these two components:

    Stream Overview

    Unfortunately, our single Checkin Processor is struggling to keep up with the volume of new checkins. This means that we're seeing longer and longer lag times between a checkin arriving in the Stream and its values being reflected in our user and location Hashes.

    And, we can't run more than one instance of the Checkin Processor, as each instance will consume the whole Stream. What we need is a way for multiple instances of the same consumer code to collaboratively process entries from a Stream.

    Redis Streams offers consumer groups as a solution for this. We can think of a consumer group as a single logical consumer that reads the entire Stream, spreading the work out between individual consumers in the group:

    Stream Overview

    Redis tracks which messages have been delivered to which consumers in the group, ensuring that each consumer receives its own unique subset of the Stream to process. This allows for parallel processing of the Stream by multiple consumer processes. As you'll see in the video, this requires us to rethink our processing logic to allow Stream entries to be processed out of order, and to avoid race conditions when updating user and location Hashes. We'll use the Lua interpreter built into Redis to help here.

    Hands-on Exercise

    In this exercise, you'll run multiple concurrent instances of the Checkin Group Processor so that you can see how they work together to collaboratively process the Stream.

    If you're still running the Checkin Processor service, stop it with Ctrl-C.

    Next, open up two terminal windows. cd to the node-js-crash-course folder that you cloned the GitHub repo into in both windows.

    In one terminal, start an instance of the Checkin Group Processor that we'll call consumer1:

    $ npm run checkingroupprocessor consumer1

    > js-crash-course@0.0.1 checkingroupprocessor
    > node ./src/checkingroupprocessor.js "consumer1"

    info: consumer1: Starting up.
    info: consumer1: Processing checkin 1609602085397-0.
    debug: consumer1: Processing 1609602085397-0.
    debug: consumer1: Updating user ncc:users:789 and location ncc:locations:171.
    info: consumer1: Acknowledged processing of checkin 1609602085397-0.
    info: consumer1: Pausing to simulate work.
    info: consumer1: Processing checkin 1609604227545-0.
    debug: consumer1: Processing 1609604227545-0.
    debug: consumer1: Updating user ncc:users:752 and location ncc:locations:100.
    info: consumer1: Acknowledged processing of checkin 1609604227545-0.
    info: consumer1: Pausing to simulate work.
    info: consumer1: Processing checkin 1609605397408-0.
    debug: consumer1: Processing 1609605397408-0.
    debug: consumer1: Updating user ncc:users:180 and location ncc:locations:13.
    info: consumer1: Acknowledged processing of checkin 1609605397408-0.
    info: consumer1: Pausing to simulate work.
    info: consumer1: Processing checkin 1609605876514-0.
    ...

    In the second terminal, start another instance of the Checkin Group Processor, consumer2:

    $ npm run checkingroupprocessor consumer2

    > js-crash-course@0.0.1 checkingroupprocessor
    > node ./src/checkingroupprocessor.js "consumer2"

    info: consumer2: Starting up.
    info: consumer2: Processing checkin 1609603711960-0.
    debug: consumer2: Processing 1609603711960-0.
    debug: consumer2: Updating user ncc:users:455 and location ncc:locations:181.
    info: consumer2: Acknowledged processing of checkin 1609603711960-0.
    info: consumer2: Pausing to simulate work.
    info: consumer2: Processing checkin 1609604778689-0.
    debug: consumer2: Processing 1609604778689-0.
    debug: consumer2: Updating user ncc:users:102 and location ncc:locations:144.
    info: consumer2: Acknowledged processing of checkin 1609604778689-0.
    info: consumer2: Pausing to simulate work.
    ...

    Look at the checkin IDs that each consumer processes. Note that they don't receive the same checkins. The Redis server gives each consumer in a group its own logical view of the Stream, each processing a subset of entries. This speeds up checkin processing as now we can have more than one consumer running at the same time.

    Let’s take a look at some of the information Redis is tracking about our consumer group. Go ahead and stop both consumer processes by pressing Ctrl-C.

    If you're using RedisInsight, open up the "Streams" browser, click the ncc:checkins key, and then select the "Consumer Groups" tab. You should see something like this:

    RedisInsight Consumer Groups

    This shows the number of consumers that are in the group, how many pending messages each has (a pending message is one that has been read by a consumer but not yet acknowledged with XACK), and the consumer's idle time since it last read from the Stream.

    Click on "checkinConsumers" in the Consumer Group table to see a breakdown of pending messages and idle time for each consumer:

    RedisInsight Consumer Groups Detail

    In a real-world system, you could use this information to detect consumers that have encountered a problem processing entries. Redis Streams provides commands to reassign messages that a consumer has read but not acknowledged, allowing you to build consumer recovery strategies that re-allocate those messages to healthy consumer instances in the same group.

    If you're using redis-cli rather than RedisInsight, you can see the same information using the XINFO and XPENDING commands:

    127.0.0.1:6379> xinfo groups ncc:checkins
    1) 1) "name"
    2) "checkinConsumers"
    3) "consumers"
    4) (integer) 2
    5) "pending"
    6) (integer) 0
    7) "last-delivered-id"
    8) "1609605876514-0"
    127.0.0.1:6379> xpending ncc:checkins checkinConsumers
    1) (integer) 0
    127.0.0.1:6379> xinfo consumers ncc:checkins checkinConsumers
    1) 1) "name"
    2) "consumer1"
    3) "pending"
    4) (integer) 0
    5) "idle"
    6) (integer) 2262454
    2) 1) "name"
    2) "consumer2"
    3) "pending"
    4) (integer) 0
    5) "idle"
    6) (integer) 2266244

    External Resources

    - + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/caching/index.html b/develop/node/nodecrashcourse/caching/index.html index 123d4b81ba..0e2c458117 100644 --- a/develop/node/nodecrashcourse/caching/index.html +++ b/develop/node/nodecrashcourse/caching/index.html @@ -4,7 +4,7 @@ Caching with Redis and Express Middleware | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Caching with Redis and Express Middleware


    Profile picture for Simon Prickett
    Author:
    Simon Prickett, Principal Developer Advocate at Redis

    We want to provide our users with up to date weather for each of our locations… so we've partnered with a company that provides a weather API.

    Our use of this API is metered and rate limited, so ideally we don't want to keep making the same requests to it over and over again. This is wasteful, could cost us money, and will slow down responses to our users.

    Redis can be used as a cache to help here. Keys in Redis can be given an expiry time, after which Redis will delete them. We'll use this capability to cache the results of weather API calls as Redis Strings, keeping them for an hour to achieve a balance between users seeing the absolute latest weather report for a location and the load we're placing on the API provider's servers.

    We'll use an extra Express middleware function to check if we've got the weather for a location in the Redis cache, and only go get it from the API provider if necessary.

    Hands-on Exercise

    In this exercise, you'll use Postman to see how caching with Redis makes an API call faster while also saving us from incurring costs associated with using a third-party service.

    You'll be using the "/location/:locationId/weather" route for this exercise. The code for this route takes a location ID, retrieves that location's latitude and longitude from its Redis Hash, then calls the OpenWeather API with those coordinates to retrieve weather data as a JSON document.

    This document is returned to the user and cached in Redis for an hour. Subsequent requests for the same location's weather within the hour are handled by the middleware function that sits in front of this route's logic. It returns the cached value without making a request to the OpenWeather servers.

    To use OpenWeather's API, you'll first need to sign up for a free API key on their website.

    Once you have your API key, stop the API server component (Ctrl-C), and set an environment variable containing your key as follows:

    $ export WEATHER_API_KEY=my_api_key

    Then start the server:

    $ npm run dev

    Now start Postman, and click the + button to create a new request:

    Postman Plus Button

    Set the URL to http://localhost:8081/api/location/99/weather and make sure you have a GET request selected from the dropdown of available HTTP verbs:

    Postman set URL and verb

    Click "Send", and you should see the weather report JSON for location 99 appear in the "Response" panel. Make a note of the overall response time that Postman reports for this request (509 milliseconds here):

    Origin Request

    Take a look at the output from the API server in your terminal window, you should see that no value for location 99 was found in the cache, so the data was requested from OpenWeather and then added to the cache in Redis:

    debug: Cache miss for location 99 weather.

    Click "Send" in Postman again to repeat the request... This time the response will be served from Redis and will be noticeably faster. No call to the OpenWeather API was made. Note the difference in response times when the result comes from cache (just 6 milliseconds here!):

    Cached Request

    Checking the output from the API server's terminal window shows that this request was served from cache:

    debug: Cache hit for location 99 weather.

    Finally, take a look at the cached data in Redis. Use RedisInsight or redis-cli to take a look at the key ncc:weather:99. The TTL command shows the number of seconds remaining before Redis deletes this key from the cache:

    $ redis-cli
    127.0.0.1:6379> GET ncc:weather:99
    127.0.0.1:6379> TTL ncc:weather:99

    If you're using RedisInsight, you can see the remaining TTL (in seconds) in the browser view:

    TTL in Redis Insight

    The key ncc:weather:99 will be deleted an hour after it was originally written to Redis, causing the next request after that deletion for location 99's weather to be a cache miss. If you want to speed up this process, delete the key ncc:weather:99 using the trash can icon in RedisInsight, or the DEL command in redis-cli:

    127.0.0.1:6379> DEL ncc:weather:99

    Then try your request in Postman again and see what happens.

    External Resources

    If you'd like to learn more about caching API responses in a Node.js application with Redis, check out Justin's excellent video:

    - + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/checkinswithstreams/index.html b/develop/node/nodecrashcourse/checkinswithstreams/index.html index 1057d0d62b..4a33589bfa 100644 --- a/develop/node/nodecrashcourse/checkinswithstreams/index.html +++ b/develop/node/nodecrashcourse/checkinswithstreams/index.html @@ -4,7 +4,7 @@ Processing Checkins with Redis Streams | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Processing Checkins with Redis Streams


    Profile picture for Simon Prickett
    Author:
    Simon Prickett, Principal Developer Advocate at Redis

    The most common action that users perform with our system is checking in at a location. This part of the system needs to quickly capture checkins and scale independently of other components.

    We decided to build a separate Express application just to receive checkin POST requests from users. This allows us to scale it separately from other API endpoints that deal with GET requests. To make our Checkin Receiver as fast as possible, we decided to do the actual work of processing checkins in a separate service. The Checkin Processor service reads checkins from the stream and updates the user and location Hashes in Redis.

    Checkins are transient data in our system - as long as we process them all, we don't need to keep them around forever. It also makes sense to store them in the order that they arrive in the system.

    Using a Redis Stream to store our checkin data is a natural fit for this use case. A Stream acts as a buffer between producer and consumer components. With Redis Streams, each entry in the stream is given a timestamp ID and the Stream is ordered by these IDs.

    In our application, the Checkin Receiver Service is the producer and the Checkin Processor the consumer. We can represent this in a diagram like so:

    Streams Overview

    Using a Stream allows these components to operate at different speeds with no knowledge of each other. The Checkin Receiver simply adds a new entry to the Stream for each checkin it receives from a user, and the Checkin Processor reads the Stream and updates user and location Hashes at its own pace.

    It's also possible to read a Redis Stream to find entries that were added in a specified time period between a start and end IDs. As our IDs are timestamps, this means that we can request data that was added in a given timeframe. We use this capability in the API Server component and in this module's coding exercise you'll get to extend this with new functionality.

    Hands-on exercise

    Take a moment to run the Checkin Processor component which reads checkins from the stream and updates user and location Hashes.

    The sample data contains 5000 unprocessed checkins which the Checkin Processor will consume. The Checkin Processor keeps track of how far it has gotten in the stream by storing the ID of the last processed checkin in Redis. This way, when it's stopped and restarted it picks up from where it left off.

    In a terminal window, cd to the node-js-crash-course folder that you cloned the GitHub repo to, and start the Checkin Processor:

    $ npm run checkinprocessor delay

    Adding delay introduces an artificial random processing time for each checkin. This slows the Checkin Processor down so that you can examine its output more easily. You should see it start up and begin processing checkins from the start of the stream at ID 0, which is the lowest possible stream entry ID:

    $ npm run checkinprocessor delay

    > js-crash-course@0.0.1 checkinprocessor
    > node ./src/checkinprocessor.js -- "delay"

    info: Reading stream from last ID 0.
    debug: Updating user ncc:users:789 and location ncc:locations:171.
    info: Processed checkin 1609602085397-0.
    debug: Updating user ncc:users:455 and location ncc:locations:181.
    info: Processed checkin 1609603711960-0.
    debug: Updating user ncc:users:752 and location ncc:locations:100.
    info: Processed checkin 1609604227545-0.
    debug: Updating user ncc:users:102 and location ncc:locations:144.
    info: Processed checkin 1609604778689-0.
    debug: Updating user ncc:users:180 and location ncc:locations:13.
    info: Processed checkin 1609605397408-0.
    ...

    Stop the Checkin Processor with Ctrl-C after it has processed a few checkins. Note the ID of the last checkin processed (this is 1609605397408-0 in the example above). Also note the user and location ID for the last checkin processed (user 180, location 13 in the example above).

    Verify that the Checkin Processor stored this ID in Redis so that it knows where to start from when it's restarted. Using redis-cli or RedisInsight, take a look at the contents of the key ncc:checkinprocessor:lastid:

    127.0.0.1:6379> get ncc:checkinprocessor:lastid
    "1609605397408-0"

    The value should match the last checkin ID that was processed.

    Finally, let's verify that the Checkin Processor updated the user's Hash with details from that checkin. Use RedisInsight or the HGETALL command in redis-cli to look at the hash whose key is ncc:users:<user-id>, replacing <user-id> with the ID of the user that you noted earlier.

    So for my example, let's look at user 180:

    127.0.0.1:6379> hgetall ncc:users:180
    1) "id"
    2) "180"
    3) "firstName"
    4) "Sophia"
    5) "lastName"
    6) "Marshall"
    7) "email"
    8) "sophia.marshall@example.com"
    9) "password"
    10) "$2b$05$DPSHjaW44H4fn9sudfz/5.f1WcuZMrA0OZIp0CALQf0MH8zH1SSda"
    11) "numCheckins"
    12) "2332"
    13) "lastCheckin"
    14) "1609605397408"
    15) "lastSeenAt"
    16) "13"

    Verify that the value for lastCheckin is the timestamp from the last processed checkin's ID (1609605397408) in my case, and that the lastSeenAt value is the location ID from the last processed checkin (13 in my case).

    Coding exercise

    In this exercise, you'll implement a new route in the API Server component. This route will return only the most recent checkin from the checkins stream. You'll use the XREVRANGE command for this.

    First, make sure the API Server is running:

    $ npm run dev

    (remember that this starts the server with nodemon, so as you modify the code and save your changes it will automatically restart and run the new code).

    Open the node-js-crash-course folder with your IDE, and open the file src/routes/checkin_routes.js. Locate the function that handles the /checkins/latest route.

    Using the XREVRANGE documentation as a guide, modify the following line to invoke XREVRANGE so that it returns just the most recent checkin:

    const latestCheckin = await redisClient.xrevrange(checkinStreamKey, 'TODO');

    Remember: When using ioredis, each parameter to a Redis command needs to be passed as a separate value.

    Test your code by visiting http://localhost:8081/checkins/latest - you should see a JSON representation of a checkin.

    To make sure your code returns the latest checkin, you need to POST a checking using Postman. Start the Checkin Receiver component in a new terminal window:

    $ npm run checkinreceiver

    Then use Postman to POST a checkin. In Postman, open a new request, configure it as shown, and press Send:

    Checkin Test with Postman

    Now when you refresh http://localhost:8081/checkins/latest in your browser, the values shown should match those that you supplied in Postman.

    External Resources

    In this video, Justin introduces Redis Streams with an example application that's very similar to the one we're building in this course:

    In our example application, we process stream entries in Node.js using the array representation that ioredis returns by default. In this video, I look at using advanced features of ioredis to make it return JavaScript objects instead:

    - + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/coursewrapup/index.html b/develop/node/nodecrashcourse/coursewrapup/index.html index bccde15f3b..fcad1b3e76 100644 --- a/develop/node/nodecrashcourse/coursewrapup/index.html +++ b/develop/node/nodecrashcourse/coursewrapup/index.html @@ -4,7 +4,7 @@ Course Wrap Up | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Course Wrap Up


    Profile picture for Simon Prickett
    Author:
    Simon Prickett, Principal Developer Advocate at Redis

    Thanks for taking the time to complete this course. We hope that you found it useful and that you've learned a few things about using Redis as a data store and as a cache in your Node.js applications.

    Don't forget that the application source code is available on GitHub for you to browse and use in your own projects.

    Stay in touch! We'd love to hear from you and see what you build with Redis. Why not join us on Discord and subscribe to our YouTube channel where we publish new videos regularly.

    Finally, if you'd like to continue your learning journey, check out Redis University for free, self-paced online courses.

    - + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/domainobjectswithhashes/index.html b/develop/node/nodecrashcourse/domainobjectswithhashes/index.html index 7a8b7af649..861ebd5ccf 100644 --- a/develop/node/nodecrashcourse/domainobjectswithhashes/index.html +++ b/develop/node/nodecrashcourse/domainobjectswithhashes/index.html @@ -4,7 +4,7 @@ Managing Domain Objects with Redis Hashes | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Managing Domain Objects with Redis Hashes


    Profile picture for Simon Prickett
    Author:
    Simon Prickett, Principal Developer Advocate at Redis

    In this module, you'll see how we're using Redis Hashes to model the user and location data in our application.

    Coding Exercise

    In your first coding exercise, you'll be adding a new route that takes a user's ID and returns their full name.

    Using your IDE, open the node-js-crash-course folder that you cloned the GitHub repository into. Open the file src/routes/user_routes.js and find the route /user/:userId/fullname which looks like this:

    // EXERCISE: Get user's full name.
    router.get(
    '/user/:userId/fullname',
    [param('userId').isInt({ min: 1 }), apiErrorReporter],
    async (req, res) => {
    const { userId } = req.params;
    /* eslint-disable no-unused-vars */
    const userKey = redis.getKeyName('users', userId);
    /* eslint-enable */

    // TODO: Get the firstName and lastName fields from the
    // user hash whose key is in userKey.
    // HINT: Check out the HMGET command...
    // https://redis.io/commands/hmget
    const [firstName, lastName] = ['TODO', 'TODO'];

    res.status(200).json({ fullName: `${firstName} ${lastName}` });
    },
    );

    In this exercise, you'll modify the code to return the user's full name by retrieving the firstName and lastName fields for the requested user from Redis.

    First, make sure your server is still running, if not start it with:

    $ npm run dev

    Next, browse to http://localhost:8081/api/user/5/fullname

    You should see:

    {
    "fullName": "TODO TODO"
    }

    Take a look at the documentation for the Redis HMGET command, which retrieves multiple named fields from a Redis Hash. You'll need to add code that calls the Redis client's hmget function, then place the values returned into the firstName and lastName variables. You should be able to retrieve both values using a single call to hmget. For guidance on how to invoke Redis commands, check out the code for the /user/:userId route which calls the HGETALL command.

    nodemon will restart the server automatically for you each time you save your changes.

    When you're ready to test your solution, browse to http://localhost:8081/api/user/5/fullname and you should see:

    {
    "fullName": "Alejandro Reyes"
    }

    If you need help from our team, join us in Discord.

    External Resources

    In this video, Justin explains what Redis Hashes are and shows how common Redis Hash commands work:

    You can find documentation for all of the Redis Hash commands on redis.io.

    - + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/introducingredisinsight/index.html b/develop/node/nodecrashcourse/introducingredisinsight/index.html index 9312739eeb..cc6bba1c6f 100644 --- a/develop/node/nodecrashcourse/introducingredisinsight/index.html +++ b/develop/node/nodecrashcourse/introducingredisinsight/index.html @@ -4,7 +4,7 @@ Introducing RedisInsight | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Introducing RedisInsight


    Profile picture for Simon Prickett
    Author:
    Simon Prickett, Principal Developer Advocate at Redis

    RedisInsight is a free product from Redis that provides an intuitive graphical user interface for managing Redis databases. RedisInsight allows you to browse Redis and monitor changes in data in real time. You can edit data stored in existing keys, create and delete new ones, and run redis-cli commands.

    RedisInsight also supports some popular Redis features, and we'll use it in this course to look at data managed by Redis JSON and Search. The data type specific views in RedisInsight make visualizing even the most complex Redis data types easy. We'll benefit from this when we look at Redis Streams later in the course.

    For this course, we'll be running Redis in a Docker container. While you can complete the course using just the redis-cli interface provided with the container, we'd strongly recommend that you download and install RedisInsight to benefit from its graphical interface and specialized views for Redis data types and modules.

    We'll cover how to connect RedisInsight to our Redis server in the "Up and Running with the Sample Application" module shortly.

    External Resources

    - + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/introductiontomodules/index.html b/develop/node/nodecrashcourse/introductiontomodules/index.html index 55123fbd8e..ff303a3d6a 100644 --- a/develop/node/nodecrashcourse/introductiontomodules/index.html +++ b/develop/node/nodecrashcourse/introductiontomodules/index.html @@ -4,7 +4,7 @@ Redis Extensibility | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Redis Extensibility


    Profile picture for Simon Prickett
    Author:
    Simon Prickett, Principal Developer Advocate at Redis

    Redis Modules

    How to extend Redis?

    Redis has a Modules API that allows developers to extend its core functionality with new capabilities. Redis Modules are libraries that can be written in C or other languages (including Rust and Zig). Modules can add new commands and/or data structures to Redis. For example, through the addition of modules, Redis can be extended to become a Timeseries or Graph database, while retaining all of its original key/value store functionality.

    Modules are loaded into the Redis server at startup by modifying the redis.conf configuration file. Application developers then make use of the extra functionality provided by a module simply by calling the module's commands in the same way they would any other Redis command. We'll see how to do this using the ioredis client for Node.js shortly.

    Where to find modules?

    The redis.io website has a catalog of available modules. Redis has developed a number of these that extend Redis in different ways, and we'll use some of them in our sample application.

    One way to get started with Redis modules is to use the Redis Stack Docker container from Docker Hub. This is the container that you're using on this course, and it includes all of the following capabilities:

    • Search and Query - a full-featured search and query engine.
    • Time Series - a timeseries database.
    • JSON - adds a native JSON data type to Redis.
    • Probabilistic - adds native Bloom and Cuckoo filter data types to Redis, plus other probabilistic data structures.

    Redis offers Redis Enterprise Cloud, a fully managed service for running and scaling Redis and Redis Stack. Sign up for Redis Enterprise Cloud and use the full-featured free tier to try it out!

    Using Redis Stack in our Application

    Our social checkin application uses three Redis Stack capabilities:

    • We'll use Search and Query to index our user and location Hashes, giving us the ability to perform queries such as:
      • "Which user is associated with the email address sara.olsen@example.com?"
      • "Find me the users with the most recent checkins".
      • "Find all restaurants within a 3 mile radius of my location that have at least a 3 star rating"
    • JSON adds commands to store and manipulate JSON documents. We'll use those to retrieve extra detail about each of our locations.
    • And finally, we'll take advantage of a space efficient Probabilistic Bloom filter to stop users from posting duplicate checkins.

    In the next section, we'll get to grips with Redis JSON...

    External Resources

    Check out these links to learn more about which modules are available for Redis and how to use the modules API to create you own:

    - + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/managingsuccess/index.html b/develop/node/nodecrashcourse/managingsuccess/index.html index 7a6087a3ee..00fc8d50d1 100644 --- a/develop/node/nodecrashcourse/managingsuccess/index.html +++ b/develop/node/nodecrashcourse/managingsuccess/index.html @@ -4,7 +4,7 @@ Managing Success with Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Managing Success with Redis


    Profile picture for Simon Prickett
    Author:
    Simon Prickett, Principal Developer Advocate at Redis

    We launched our checkin application, and it's been an incredible success! Users love it and are checking in to all of the locations that signed up to be part of it!

    In the next few modules, we'll look at how Redis can help us manage growth as we gain more users, process more checkins, and increase our usage of external services.

    We'll look at:

    • Using Redis as a cache in an Express application.
    • Scaling horizontally by using Redis as a session store for Express.
    • Scaling checkin processing with Redis Streams consumer groups.
    • Preventing duplicate checkins using probabilistic data structures.
    - + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/redisandnodejs/index.html b/develop/node/nodecrashcourse/redisandnodejs/index.html index 2db3e82c87..8e8fb17cb2 100644 --- a/develop/node/nodecrashcourse/redisandnodejs/index.html +++ b/develop/node/nodecrashcourse/redisandnodejs/index.html @@ -4,7 +4,7 @@ Using Redis from Node.js | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Using Redis from Node.js


    Profile picture for Simon Prickett
    Author:
    Simon Prickett, Principal Developer Advocate at Redis

    To connect to Redis from an application, we need a Redis client library for the language that we're coding in. Redis clients perform the following functions:

    • Manage the connections between our application and the Redis server.
    • Handle network communications to the Redis server using Redis' wire protocol.
    • Provide a language specific API that we use in our application.

    For Node.js, there are two popular Redis clients: ioredis and node_redis. Both clients expose similar programming APIs, wrapping each Redis command as a function that we can call in a Node.js script. For this course, we'll use ioredis which has built in support for modern JavaScript features such as Promises.

    Here's a complete Node.js script that uses ioredis to perform the SET and GET commands that we previously tried in redis-cli:

    const Redis = require('ioredis');

    const redisDemo = async () => {
    // Connect to Redis at 127.0.0.1, port 6379.
    const redisClient = new Redis({
    host: '127.0.0.1',
    port: 6379,
    });

    // Set key "myname" to have value "Simon Prickett".
    await redisClient.set('myname', 'Simon Prickett');

    // Get the value held at key "myname" and log it.
    const value = await redisClient.get('myname');
    console.log(value);

    // Disconnect from Redis.
    redisClient.quit();
    };

    redisDemo();

    ioredis wraps each Redis command in a function that can either accept a callback or return a Promise. Here, I'm using async/await to wait for each command to be executed on the Redis server before moving on to the next.

    Running this code displays the value that's now stored in Redis:

    $ node basic_set_get.js
    Simon Prickett

    External Resources

    The following additional resources can help you understand how to access Redis from a Node.js application:

    • ioredis: Home page for the ioredis client.
    • node_redis: Home page for the node_redis client.
    • RU102JS, Redis for JavaScript Developers: A free online course at Redis University that provides a deep dive into Redis for Node.js applications. You can expect to learn how to make connections to Redis, store and retrieve data, and leverage essential Redis features such as sorted sets and streams.
    • Redis clients by programming language: A large list of Redis clients at redis.io.

    In this video, I take a look at how to get up and running with the ioredis client:

    - + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/redisbloom/index.html b/develop/node/nodecrashcourse/redisbloom/index.html index 4661639c4a..fd2a97cb7c 100644 --- a/develop/node/nodecrashcourse/redisbloom/index.html +++ b/develop/node/nodecrashcourse/redisbloom/index.html @@ -4,7 +4,7 @@ Preventing Duplicate Checkins with Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Preventing Duplicate Checkins with Redis


    Profile picture for Simon Prickett
    Author:
    Simon Prickett, Principal Developer Advocate at Redis

    As our application grows in popularity, we're getting more and more checkins from our expanding user base. We've decided that we want to limit this a bit, and only allow each user to give each location a particular star rating once. For example, if user 100 checks in at location 73 and rates it 3 stars, we want to reject any further 3 star checkins from them at that location.

    In order to do this, we need a way of remembering each checkin, and to quickly determine if we've seen it before. We can't do this by querying the data in our checkins stream, as streams don't allow that sort of access and are periodically trimmed, removing older checkins that have been processed and are no longer needed.

    We can represent our checkins in the form <userId>:<locationId>:<rating>. With this schema, the string 100733 would represent user 100's checkin at location 73 with a 3 star rating.

    We then need to remember each checkin, so that we can ensure it's a unique combination of user ID, location ID, and star rating. We could use a Redis Set for this. Sets are great whenever we want to maintain a collection of unique members, as they don't allow duplicates. With a Redis Set, adding a new member and checking whether a member is in the Set are both O(1) operations, meaning their performance doesn't degrade as the Set grows.

    However, every new member of a Set that we add (in our case these are unique checkins) causes the Set to take up more memory on the Redis server. And this growth will become an issue as we continue to receive more and more checkins.

    But what if there was a way to check if a potential new member was already in a set that didn't have this memory consumption issue? A Bloom Filter is a space-efficient probabilistic data structure that can help here. Bloom Filters store hashed representations of the members of a Set rather than the actual member data itself. Unlike a Set, we can't get members back out of the Bloom Filter, but we can test if something is already in there or not... with some false positives due to hash collisions. When asked if something is a member of a Set, the Bloom Filter can tell us "no it isn't", or "it's likely that it is".

    This hashing approach sacrifices the 100% accuracy we'd get with a Set to dramatically reduce the memory overhead. Bloom Filters can be configured with a desired acceptable error rate, so for our application this seems like a good way to enforce our "no duplicate checkins" rule without having a runaway memory consumption problem. Whenever the Bloom Filter tells us its maybe seen a checkin before it will mostly be correct, and we'll accept that sometimes we disallow a checkin that we actually haven't had before as a sensible tradeoff for keeping our memory usage under control.

    Redis Stack provides a Bloom Filter implementation for Redis, along with other useful probabilistic data structures. In the video, you'll see how easy this is to use in a Node.js application, with no math skills required!

    Hands-on Exercise

    In this exercise, you'll see the Bloom filter in action by attempting to submit the same checkin to the system more than once.

    You'll need to be running the Checkin Receiver Service... stop it with Ctrl-C if it's still running from a previous exercise. Then, restart it using the following command. This command will disable the login requirement which we don't want for this exercise:

    $ npm run checkinreceiver

    > js-crash-course@0.0.1 checkinreceiver
    > node ./src/checkinreceiver.js

    info: Authentication disabled, checkins do not require a valid user session.
    info: Checkin receiver listening on port 8082.

    Now, open Postman and create a new request, selecting "POST" as the HTTP verb.

    • Set the URL to localhost:8082/api/checkin
    • In the "Body" tab, set the type dropdowns to "raw" and "JSON"
    • In the body text area, enter the following JSON:
    { "userId": 100, "locationId": 73, "starRating": 3 }

    Your request should look like this:

    Checkin Request

    Click "Send" to submit your checkin to the Checkin Receiver, which should respond with a 202 Accepted status and empty response body:

    202 Checkin Response

    Click "Send" a second time and you should receive a 422 Unprocessable Entity response from the Checkin Receiver along with an error message:

    422 Checkin Response

    With the Checkin Receiver service still running, start the Checkin Generator utility that generates random checkins:

    node-js-crash-course $ npm run checkingenerator

    > js-crash-course@0.0.1 checkingenerator
    > node ./src/checkingenerator.js

    info: Started checkin generator.

    Leave the Checkin Generator running. It will generate a new random checkin every few seconds. Let it run and generate a few hundred checkins. While it's doing that, periodically monitor the memory usage required by the Bloom Filter using redis-cli or the CLI tab in RedisInsight:

    127.0.0.1:6379> bf.info ncc:checkinfilter
    1) Capacity
    2) (integer) 1000000
    3) Size
    4) (integer) 2576760
    5) Number of filters
    6) (integer) 1
    7) Number of items inserted
    8) (integer) 269
    9) Expansion rate
    10) (integer) 2

    Run this a few times as more checkins are generated, and note that the size required to store the Bloom Filter doesn't increase as the number of items inserted increases. While sacrificing some accuracy, Bloom Filters are a storage efficient solution for this type of use case.

    External Resources

    In this video, Guy Royse explains what Bloom Filters are and how to use them in Redis:

    Redis Sets are a powerful data type to know about, learn more with Andrew's two videos on the Redis University YouTube channel. First, Redis Sets Explained:

    Followed by Redis Sets Elaborated:

    - + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/redisearch/index.html b/develop/node/nodecrashcourse/redisearch/index.html index bbcd8e590f..78afad18ef 100644 --- a/develop/node/nodecrashcourse/redisearch/index.html +++ b/develop/node/nodecrashcourse/redisearch/index.html @@ -4,7 +4,7 @@ Indexing and Querying with Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Indexing and Querying with Redis


    Profile picture for Simon Prickett
    Author:
    Simon Prickett, Principal Developer Advocate at Redis

    We chose to store our user and location data in Redis Hashes. Hashes are a great fit for storing domain objects. Recall that we've chosen to store each user in a Hash whose key contains the user ID. For example, here's user 852 as seen in RedisInsight:

    User 852 viewed with RedisInsight

    If you're using redis-cli, you can look at user 852 with the HGETALL command:

    127.0.0.1:6379> hgetall ncc:users:852
    1) "id"
    2) "852"
    3) "firstName"
    4) "Dominik"
    5) "lastName"
    6) "Schiffmann"
    7) "email"
    8) "dominik.schiffmann@example.com"
    9) "password"
    10) "$2b$05$xbkSwODz1tWqdE7xWb393eiYIQcdiEdbbvhK88.Xr9sW7WxdI26qi"
    11) "numCheckins"
    12) "9353"
    13) "lastCheckin"
    14) "1488517098363"
    15) "lastSeenAt"
    16) "124"

    Storing data in Hashes means that we can easily and efficiently retrieve the contents of the Hash, provided that we know the key. So it's trivial to look up user 852, but how can we perform any of the following operations?

    • Get the user whose email address is dominik.schiffmann@example.com.
    • Find all users that were last seen at location 124.
    • Find all the users who have between 1000 and 3000 checkins.
    • Find all locations within a 10 mile radius of a given latitude / longitude coordinate and which have at least a 3 star rating.

    Redis is a key/value database. This means that its data model is optimized for retrieval by key. The queries above can't be resolved by knowing just the Hash key - we need some other mechanism to index our data.

    Traditionally in a key/value database, this has meant adding code to create and manually update indexes. For example to resolve the query "which user has the email address dominik.schiffmann@example.com", we might create a new String key containing that email address, with the value being the user's ID:

    127.0.0.1:6379> set ncc:users:byemail:dominik.schiffmann@example.com 852
    OK

    Now, if we want to get Dominik's user details given only his email address, we have a two step process to follow:

    1. Look up the user ID for the user associated with the email address we have.
    2. Use that user ID to retrieve the values from the user's Hash.
    127.0.0.1:6379> get ncc:users:byemail:dominik.schiffmann@example.com
    "852"
    127.0.0.1:6379> hgetall ncc:users:852
    1) "id"
    2) "852"
    3) "firstName"
    4) "Dominik"
    5) "lastName"
    6) "Schiffmann"
    7) "email"
    8) "dominik.schiffmann@example.com"
    9) "password"
    10) "$2b$05$xbkSwODz1tWqdE7xWb393eiYIQcdiEdbbvhK88.Xr9sW7WxdI26qi"
    11) "numCheckins"
    12) "9353"
    13) "lastCheckin"
    14) "1488517098363"
    15) "lastSeenAt"
    16) "124"

    We'd also need to keep this information up to date and in sync with changes to the Hash at ncc:users:852 ourselves in our application code.

    Other sorts of secondary indexes can be created using other Redis data types. For example, we might use a Redis Sorted Set as a secondary index, allowing us to perform range queries such as "Find all the users who have between 1000 and 3000 checkins". Again, we'd have to populate and maintain this extra data structure ourselves in the application code.

    Redis Stack solves all of these problems for us and more. It adds an indexing, querying and full-text search engine to Redis that automatically keeps track of changes to data in indexed Hashes. Redis Stack provides a flexible query language to answer questions such as "Find me all the gyms with at least a 3 star rating and more than 200 checkins within 10 miles of Oakland, California" without adding code to build or maintain secondary data structures in our application.

    Watch the video to see how Redis Stack is used in our example Node.js application.

    Coding Exercise

    In this exercise, you'll finish implementing a route that uses Redis to return all users whose last checkin was at a given location.

    Open the node-js-crash-course folder with your IDE, and find the file src/routes/user_routes.js.

    In this file, you'll see a partly implemented route /users/at/:locationId. To complete this exercise, you'll need to replace this line:

    const searchResults = await redis.performSearch(
    redis.getKeyName('usersidx'),
    'TODO... YOUR QUERY HERE',
    );

    with one containing the correct query to return users whose "lastSeenAt" field is set to the value of locationId. You'll need to use the "numeric range" syntax for this, as the "lastSeenAt" field was indexed as a number. Be sure to check out the Query Syntax documentation for Redis to get help with this.

    To try your code, ensure that the API Server component is running:

    $ npm run dev

    (remember, this will use nodemon to restart the server any time you save a code change).

    Then, point your browser at http://localhost:8081/api/users/at/33. If your query is correct, you should see output similar to the following (actual users may differ, just ensure that the value of lastSeenAt for each matches the location ID you provided - 33 in this case):

    [
    {
    "id": "238",
    "firstName": "Jonas",
    "lastName": "Nielsen",
    "numCheckins": "7149",
    "lastCheckin": "1515248028256",
    "lastSeenAt": "33"
    },
    {
    "id": "324",
    "firstName": "Frans",
    "lastName": "Potze",
    "numCheckins": "8623",
    "lastCheckin": "1515976232073",
    "lastSeenAt": "33"
    },
    ...
    ]

    To help you develop your query, use one of the guides in RedisInsight workbench, or read more about the FT.SEARCH command.

    External Resources

    Querying, Index, and Full-Text Search in Redis:

    Finding Bigfoot RESTfuly with Express + Redis Stack:

    Other resources:

    - + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/redisjson/index.html b/develop/node/nodecrashcourse/redisjson/index.html index 052bda78e3..8c720dbb2d 100644 --- a/develop/node/nodecrashcourse/redisjson/index.html +++ b/develop/node/nodecrashcourse/redisjson/index.html @@ -4,7 +4,7 @@ Managing Document Data with JSON | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Managing Document Data with JSON


    Profile picture for Simon Prickett
    Author:
    Simon Prickett, Principal Developer Advocate at Redis

    We used Redis' built-in Hash data type to represent our user and location entities. Hashes are great for this, but they are limited in that they can only contain flat name/value pairs. For our locations, we want to store extra details in a more structured way.

    Here's an example of the additional data we want to store about a location:

    {
    "id": 121,
    "hours": [
    { "day": "Monday", "hours": "6-7" },
    { "day": "Tuesday", "hours": "6-7" },
    { "day": "Wednesday", "hours": "7-8" },
    { "day": "Thursday", "hours": "6-9" },
    { "day": "Friday", "hours": "8-5" },
    { "day": "Saturday", "hours": "9-6" },
    { "day": "Sunday", "hours": "6-4" }
    ],
    "socials": [
    {
    "instagram": "theginclub",
    "facebook": "theginclub",
    "twitter": "theginclub"
    }
    ],
    "website": "www.theginclub.com",
    "description": "Lorem ipsum...",
    "phone": "(318) 251-0608"
    }

    We could store this data as serialized JSON in a Redis String, but then our application would have to retrieve and parse the entire document every time it wanted to read some of the data. And we'd have to do the same to update it too. Furthermore, with this approach, update operations aren't atomic and a second client could update the JSON stored at a given key while we're making changes to it in our application code. Then, when we serialize our version of the JSON back into the Redis String, the other client's changes would be lost.

    Redis Stack adds a new JSON data type to Redis, and a query syntax for selecting and updating individual elements in a JSON document atomically on the Redis server. This makes our application code simpler, more efficient, and much more reliable.

    Coding Exercise

    In this exercise, you'll complete the code for an API route that gets just the object representing a location's opening hours for a given day. Open the file src/routes/location_routes.js, and find the route for /location/:locationId/hours/:day. The starter code looks like this:

    // EXERCISE: Get opening hours for a given day.
    router.get(
    '/location/:locationId/hours/:day',
    [
    param('locationId').isInt({ min: 1 }),
    param('day').isInt({ min: 0, max: 6 }),
    apiErrorReporter,
    ],
    async (req, res) => {
    /* eslint-disable no-unused-vars */
    const { locationId, day } = req.params;
    /* eslint-enable */
    const locationDetailsKey = redis.getKeyName('locationdetails', locationId);

    // TODO: Get the opening hours for a given day from
    // the JSON stored at the key held in locationDetailsKey.
    // You will need to provide the correct JSON path to the hours
    // array and return the element held in the position specified by
    // the day variable. Make sure Redis JSON returns only the day
    // requested!
    const jsonPath = 'TODO';

    /* eslint-enable no-unused-vars */
    const hoursForDay = JSON.parse(
    await redisClient.call('JSON.GET', locationDetailsKey, jsonPath),
    );
    /* eslint-disable */

    // If null response, return empty object.
    res.status(200).json(hoursForDay || {});
    },
    );

    You'll need to update the code to provide the correct JSON path, replacing the "TODO" value with a JSON path expression.

    Looking at the JSON stored at key ncc:locationdetails:121, we see that the opening hours are stored as an array of objects in a field named hours, where day 0 is Monday and day 6 is Sunday:

    Location Details in RedisInsight

    So you'll need a JSON path query that gets the right element from the hours array depending on the value stored in the variable day.

    If you're using redis-cli, you can look at the structure of the JSON document with the following command:

    json.get ncc:locationdetails:121 .

    Make sure your query returns only the day requested, so that you don't have to write Node.js code to filter the value returned from Redis. Use the JSON path syntax page to help you formulate the right query.

    To test your code, start the server with:

    $ npm run dev

    Recall that this will allow you to edit the code and try your changes without restarting the server.

    If you have the correct JSON path in your code, visiting http://localhost:80801/api/location/121/hours/2 should return:

    {
    "day": "Wednesday",
    "hours": "7-8"
    }

    Don't forget that if you have questions or need help, we're available on Discord.

    External Resources

    In this video, Justin introduces JSON using a fun taco truck example!

    Learn more about JSON at https://redis.io/docs/stack/json/.

    - + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/runningtheapplication/index.html b/develop/node/nodecrashcourse/runningtheapplication/index.html index 9e0296e3c3..8d1804f0c4 100644 --- a/develop/node/nodecrashcourse/runningtheapplication/index.html +++ b/develop/node/nodecrashcourse/runningtheapplication/index.html @@ -4,7 +4,7 @@ Up and Running with the Sample Application | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Up and Running with the Sample Application


    Profile picture for Simon Prickett
    Author:
    Simon Prickett, Principal Developer Advocate at Redis

    Let's get hands on, clone the application repository from GitHub, start up Redis in a Docker container, and load the sample data!

    Reminder - Software Prerequisites

    To get the most from this course, you'll need a machine that can run the application and the Redis server. The application runs directly on your machine and the Redis server runs in a Docker container.

    You'll need the following installed on your machine:

    • Docker (you'll need the docker-compose command)
    • Node.js (use the current Long Term Stable - LTS - version)
    • git command line tools
    • Your favorite IDE (we like VSCode, but anything you're comfortable with works)
    • Postman - we're going to make some API calls and Postman makes that easy.

    Setup / Installation Process

    Get the Code and Install Dependencies

    Clone the course repo from GitHub and install the dependencies:

    $ git clone https://github.com/redislabs-training/node-js-crash-course.git
    $ cd node-js-crash-course
    $ npm install

    Start Redis (Docker)

    From the node-js-crash-course directory, start Redis using docker-compose:

    $ docker-compose up -d
    Creating network "node-js-crash-course_default" with the default driver
    Creating rediscrashcourse ... done
    $ docker ps

    The output from the docker ps command should show one container running, using the "redis/redis-stack" image. This container runs Redis with the Search, JSON, Time Series, and Probabilistic data structures.

    Load the Sample Data into Redis

    Load the course example data using the provided data loader. This is a Node.js application:

    $ npm run load all
    > node src/utils/dataloader.js -- "all"

    Loading user data...
    User data loaded with 0 errors.
    Loading location data...
    Location data loaded with 0 errors.
    Loading location details...
    Location detail data loaded with 0 errors.
    Loading checkin stream entries...
    Loaded 5000 checkin stream entries.
    Creating consumer group...
    Consumer group created.
    Dropping any existing indexes, creating new indexes...
    Created indexes.
    Deleting any previous bloom filter, creating new bloom filter...
    Created bloom filter.

    In another terminal window, run the redis-cli executable that's in the Docker container. Then, enter the Redis commands shown at the redis-cli prompt to verify that data loaded successfully:

    $ docker exec -it rediscrashcourse redis-cli
    127.0.0.1:6379> hgetall ncc:locations:106
    1) "id"
    2) "106"
    3) "name"
    4) "Viva Bubble Tea"
    5) "category"
    6) "cafe"
    7) "location"
    8) "-122.268645,37.764288"
    9) "numCheckins"
    10) "886"
    11) "numStars"
    12) "1073"
    13) "averageStars"
    14) "1"
    127.0.0.1:6379> hgetall ncc:users:12
    1) "id"
    2) "12"
    3) "firstName"
    4) "Franziska"
    5) "lastName"
    6) "Sieben"
    7) "email"
    8) "franziska.sieben@example.com"
    9) "password"
    10) "$2b$05$uV38PUcdFD3Gm6ElMlBkE.lzZutqWVE6R6ro48GsEjcmnioaZZ55C"
    11) "numCheckins"
    12) "8945"
    13) "lastCheckin"
    14) "1490641385511"
    15) "lastSeenAt"
    16) "22"
    127.0.0.1:6379> xlen ncc:checkins
    (integer) 5000

    Start and Configure RedisInsight

    If you're using RedisInsight, start it up and it should open in your browser automatically. If not, point your browser at http://localhost:8001.

    If this is your first time using RedisInsight click "I already have a database".

    If you already have other Redis databases configured in RedisInsight, click "Add Redis Database".

    Now, click "Connect to a Redis Database Using hostname and port". Configure the database details as shown below, then click "Add Redis Database".

    Configuring RedisInsight

    You should now be able to browse your Redis instance. If you need more guidance on how to connect to Redis from RedisInsight, check out Justin's video below but be sure to use 127.0.0.1 as the host, 6379 as the port and leave the username and password fields blank when configuring your database.

    Start the Application

    Now it's time to start the API Server component of the application and make sure it connects to Redis. This component listens on port 8081.

    If port 8081 is in use on your system, edit this section of the config.json file and pick another available port:

    "application": {
    "port": 8081
    },

    Start the server like this:

    $ npm run dev

    > ./node_modules/nodemon/bin/nodemon.js

    [nodemon] 2.0.7
    [nodemon] to restart at any time, enter `rs`
    [nodemon] watching path(s): *.*
    [nodemon] watching extensions: js,mjs,json
    [nodemon] starting `node src/server.js`
    Warning: Environment variable WEATHER_API_KEY is not set!
    info: Application listening on port 8081.

    This starts the application using nodemon, which monitors for changes in the source code and will restart the server when a change is detected. This will be useful in the next module where you'll be making some code changes.

    Ignore the warning about WEATHER_API_KEY — we'll address this in a later exercise when we look at using Redis as a cache.

    To verify that the server is running correctly and connected to Redis, point your browser at:

    http://localhost:8081/api/location/200

    You should see the summary information for location 200, Katia's Kitchen:

    {
    "id": "200",
    "name": "Katia's Kitchen",
    "category": "restaurant",
    "location": "-122.2349598,37.7356811",
    "numCheckins": "359",
    "numStars": "1021",
    "averageStars": "3"
    }

    Great! Now you're up and running. Let's move on to the next module and see how we're using Redis Hashes in the application. You'll also get to write some code!

    Stopping redis-cli, the Redis Container and the Application

    Don't do this now, as we’ve only just started! However, when you do want to shut everything down, here's how to do it...

    To stop running redis-cli, simply enter the quit command at the redis-cli prompt:

    127.0.0.1:6379> quit
    $

    To stop the Redis Server, make sure you are in the node-js-crash-course folder that you checked the application repo out to, then:

    $ docker-compose down
    Stopping rediscrashcourse ... done
    Removing rediscrashcourse ... done
    Removing network node-js-crash-course_default

    Redis persists data to the "redisdata" folder. If you want to remove this, just delete it:

    $ rm -rf redisdata

    To stop each of the application's components, press Ctrl+C in the terminal window that the component is running in. For example, to stop the API server:

    $ npm run dev

    > ./node_modules/nodemon/bin/nodemon.js

    [nodemon] 2.0.7
    [nodemon] to restart at any time, enter `rs`
    [nodemon] watching path(s): *.*
    [nodemon] watching extensions: js,mjs,json
    [nodemon] starting `node src/server.js`
    info: Application listening on port 8081.
    ^C
    node-js-crash-course $
    - + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/sampleapplicationoverview/index.html b/develop/node/nodecrashcourse/sampleapplicationoverview/index.html index d5486ec846..d04e94bd44 100644 --- a/develop/node/nodecrashcourse/sampleapplicationoverview/index.html +++ b/develop/node/nodecrashcourse/sampleapplicationoverview/index.html @@ -4,7 +4,7 @@ Sample Application Overview | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Sample Application Overview


    Profile picture for Simon Prickett
    Author:
    Simon Prickett, Principal Developer Advocate at Redis

    In this course, we'll look at how to use Redis as a data store and cache in the context of a sample application. Imagine that we're building a sort of social network application where users can "check in" at different locations and give them a star rating… from 0 for an awful experience through 5 to report that they had the best time ever there!

    When designing our application, we determined that there's a need to manage data about three main entities:

    • Users
    • Locations
    • Checkins

    Let's look at what we're storing about each of these entities. As we're using Redis as our only data store, we'll also consider how they map to Redis data types...

    Users

    We'll represent each user as a flat map of name/value pairs with no nested objects. As we'll see later on, this maps nicely to a Redis Hash. Here's a JSON representation of the schema we'll use to represent each user:

    {
    "id": 99,
    "firstName": "Isabella",
    "lastName": "Pedersen",
    "email": "isabella.pedersen@example.com",
    "password": "xxxxxx1",
    "numCheckins": 8073,
    "lastCheckin": 1544372326893,
    "lastSeenAt": 138
    }

    We've given each user an ID and we're storing basic information about them. Also, we’ll encrypt their password using bcrypt when we load the sample data into Redis.

    For each user, we'll keep track of the total number of checkins that they've submitted to the system, and the timestamp and location ID of their most recent checkin so that we know where and when they last used the system.

    Locations

    For each location that users can check in at, we're going to maintain two types of data. The first of these is also a flat map of name/value pairs, containing summary information about the location:

    {
    "id": 138,
    "name": "Stacey's Country Bakehouse",
    "category": "restaurant",
    "location": "-122.195447,37.774636",
    "numCheckins": 170,
    "numStars": 724,
    "averageStars": 4
    }

    We've given each location an ID and a category—we'll use the category to search for locations by type later on. The "location" field stores the coordinates in longitude, latitude format… this is the opposite from the usual latitude, longitude format. We'll see how to use this to perform geospatial searches later when we look at Redis Search.

    For each location, we're also storing the total number of checkins that have been recorded there by all of our users, the total number of stars that those checkins gave the location, and an average star rating per checkin for the location.

    The second type of data that we want to maintain for each location is what we'll call "location details". These take the form of more structured JSON documents with nested objects and arrays. Here's an example for location 138, Stacey's Country Bakehouse:

    {
    "id": 138,
    "hours": [
    { "day": "Monday", "hours": "8-7" },
    { "day": "Tuesday", "hours": "9-7" },
    { "day": "Wednesday", "hours": "6-8" },
    { "day": "Thursday", "hours": "6-6" },
    { "day": "Friday", "hours": "9-5" },
    { "day": "Saturday", "hours": "8-9" },
    { "day": "Sunday", "hours": "7-7" }
    ],
    "socials": [
    {
    "instagram": "staceyscountrybakehouse",
    "facebook": "staceyscountrybakehouse",
    "twitter": "staceyscountrybakehouse"
    }
    ],
    "website": "www.staceyscountrybakehouse.com",
    "description": "Lorem ipsum....",
    "phone": "(316) 157-8620"
    }

    We want to build an API that allows us to retrieve all or some of these extra details, and keep the overall structure of the document intact. For that, we'll need Redis with JSON support as we'll see later.

    Checkins

    Checkins differ from users and locations in that they're not entities that we need to store forever. In our application, checkins consist of a user ID, a location ID, a star rating and a timestamp - we'll use these values to update attributes of our users and locations.

    Each checkin can be thought of as a flat map of name/value pairs, for example:

    {
    "userId": 789,
    "locationId": 171,
    "starRating": 5
    }

    Here, we see that user 789 visited location 171 ("Hair by Parvinder") and was really impressed with the service.

    We need a way to store checkins for long enough to process them, but not forever. We also need to associate a timestamp with each one, as we'll need that when we process the data.

    Redis provides a Stream data type that's perfect for this - with Redis Streams, we can store maps of name/value pairs and have the Redis server timestamp them for us. Streams are also perfect for the sort of asynchronous processing we want to do with this data. When a user posts a new checkin to our API we want to store that data and respond to the user that we've received it as quickly as possible. Later we can have one or more other parts of the system do further processing with it. Such processing might include updating the total number of checkins and last seen at fields for a user, or calculating a new average star rating for a location.

    Application Architecture

    We decided to use Node.js with the Express framework and ioredis client to build the application. Rather than have a monolithic codebase, the application has been split out into four components or services. These are:

    • Authentication Service: Listens on an HTTP port and handles user authentication using Redis as a shared session store that other services can access.
    • Checkin Receiver: Listens on an HTTP port and receives checkins as HTTP POST requests from our users. Each checkin is placed in a Redis Stream for later processing.
    • Checkin Processor: Monitors the checkin Stream in Redis, updating user and location information as it processes each checkin.
    • API Server: Implements the bulk of the application's API endpoints, including those to retrieve information about users and locations from Redis.

    These components fit together like so:

    Application Architecture

    There's also a data loader component, which we'll use to load some initial sample data into the system.

    As we progress through the course, we'll look at each of these components in turn. In the next module, you'll get hands on and clone the application repo, start a Redis server with Docker, and load the sample data.

    External Resourses

    - + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/sessionstorage/index.html b/develop/node/nodecrashcourse/sessionstorage/index.html index 352579e048..40491f2061 100644 --- a/develop/node/nodecrashcourse/sessionstorage/index.html +++ b/develop/node/nodecrashcourse/sessionstorage/index.html @@ -4,7 +4,7 @@ Scaling an Express Application with Redis as a Session Store | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    Scaling an Express Application with Redis as a Session Store


    Profile picture for Simon Prickett
    Author:
    Simon Prickett, Principal Developer Advocate at Redis

    We're building our application as a series of components, with Redis as a data store and cache. Most of these components talk to the user via the HTTP request / response cycle, which is inherently stateless. However, we want to maintain state, or remember things about the user from one request to the next in some circumstances.

    We can identify users by having the application set a cookie in the response that is returned to the user - their browser will then attach this cookie to future requests. This can then be used to store information about the user in a server-side session object that Express keeps track of by means of the ID in the cookie.

    You can think of the cookie as a sort of key, and the session object as a value. We could store these in memory in our server, but this approach limits us to a single server instance that would lose all of session data if it were to crash and restart.

    Fortunately, Redis makes an excellent store for session data - it's fast and durable, and allows us to scale system components horizontally by adding more instances of them. We've used the npm package "connect-redis" to add Redis as a session store for the Authentication and Checkin Receiver services, with minimal code required.

    Hands-on Exercise

    In this exercise, you'll enable the authentication functionality in the Checkin Receiver service, and use Postman to establish a session with the Authentication service in order to submit an authenticated checkin to the system.

    First, stop the Checkin Receiver if you have it running. Press Ctrl-C in its terminal window.

    Now, restart the Checkin Receiver with the optional authentication functionality enabled:

    $ npm run checkinreceiver auth

    > js-crash-course@0.0.1 checkinreceiver
    > node ./src/checkinreceiver.js "auth"

    info: Authentication enabled, checkins require a valid user session.
    info: Checkin receiver listening on port 8082.

    Note that the Checkin Receiver reports that authentication is now enabled.

    In a second terminal window, cd to the node-js-crash-course directory that you cloned the project repo into, and start the Authentication Service which will listen on port 8083:

    $ npm run auth

    > js-crash-course@0.0.1 auth /Users/simonprickett/source/github/node-js-crash-course
    > node ./src/auth.js

    info: Authentication service listening on port 8083.

    Open up a new request in Postman, and make a POST request to perform a checkin as follows:

    Authenticated Checkin Attempt

    Click "Send" - this time, the Checkin Receiver service should reject the request as you haven't authenticated with the system. You should see a 401 Unauthorized response:

    Checkin Attempt 401 Response

    And in the Checkin Receiver's terminal window, you should see that it rejected your request as it couldn't find your session:

    debug: Rejecting checkin - no valid user session found.

    The Checkin Receiver rejected your request because it couldn't find a value for 'user' in your request's session (check out the code in src/checkinreceiver.js).

    Let's try again, but this time we'll login first. This will establish a session and we'll be able to see how that gets stored in Redis…

    Keep your checkin request tab in Postman, and open a new tab to create a POST request to the Authentication service.

    Set the URL to localhost:8083/login and the JSON body to:

    { "email": "al.appelhof@example.com", "password": "secret123" }

    Your request should look like this:

    Login Request

    Click "Send" to login. The Authentication Service will check the supplied credentials against what's stored in Redis at key ncc:user:77 and create a session object for that user, adding the email address to the "user" property in the session. Postman should show a 200 (success) response code and the text "OK".

    Click the blue "Cookies" text in Postman, and you should see that a cookie was set for "checkinapp" on localhost. Click on the "checkinapp" text to see the contents of the cookie:

    Local Cookies Cookie Detail

    Now flip back to your tab in Postman that has the checkin request for user 77. Send the request again, and this time it should succeed, as Postman will also send the cookie containing your session ID and the Checkin Receiver will then be able to validate you have an authenticated session. This time, you should get a 202 Accepted response:

    Authenticated Checkin 202 Response

    When the Checkin Receiver got your request, it used the value in your cookie to look up and load your session from Redis (connect-redis does this for us), then checked that the session had a "user" property set. Any system component that's running on a localhost URL can now access your session and query and store information in it.

    Finally, let's see what the session looks like in Redis... using redis-cli or the RedisInsight browser, find all the session keys like this:

    127.0.0.1:6379> keys ncc:session:*
    1) "ncc:session:Blvc-93k2TckafgwS0IDAHfW-MPGhqyl"

    Note that the key name contains the session ID that was in your cookie. connect-redis manages these keys for us as Redis Strings, all our application needs to do is manipulate the request's session object.

    Let's see what's in our session and when it expires:

    127.0.0.1:6379> get ncc:session:Blvc-93k2TckafgwS0IDAHfW-MPGhqyl
    "{\"cookie\":{\"originalMaxAge\":null,\"expires\":null,\"httpOnly\":true,\"path\":\"/\"},\"user\":\"al.appelhof@example.com\"}"
    127.0.0.1:6379> ttl ncc:session:Blvc-93k2TckafgwS0IDAHfW-MPGhqyl
    (integer) 85693

    We didn't specify a session length in our code when setting up connect-redis, so it's defaulted to expiring sessions after a day.

    Finally, let's logout from the Authentication Service and ensure that our session data gets removed from Redis. In Postman, create a GET request as follows and click Send:

    Logout Request

    The code in the logout handler destroys your session, which should then disappear from Redis:

    127.0.0.1:6379> get ncc:session:Blvc-93k2TckafgwS0IDAHfW-MPGhqyl
    (nil)

    Next, we'll move on to look at how to scale up our checkin processing to cope with all these new users!

    External Resources

    - + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/welcome/index.html b/develop/node/nodecrashcourse/welcome/index.html index 9caaf3c813..023b28885f 100644 --- a/develop/node/nodecrashcourse/welcome/index.html +++ b/develop/node/nodecrashcourse/welcome/index.html @@ -4,7 +4,7 @@ Welcome to the Node.js Crash Course | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Welcome to the Node.js Crash Course


    Profile picture for Simon Prickett
    Author:
    Simon Prickett, Principal Developer Advocate at Redis

    Welcome to the Node.js Redis Crash Course. I'm Simon Prickett, Principal Developer Advocate at Redis.

    In this course, you'll learn about using Redis with Node.js through a blend of video and text-based training. You can also get hands-on with some optional workshop exercises where you'll add new functionality to an existing Node.js application.

    Learning Objectives

    By following the materials in this course, you can expect to learn about:

    • What Redis is and where to find it.
    • The benefits of adding Redis to your stack, when and how to use it.
    • How to leverage Redis in the context of a modern Node.js Express application with the ioredis client.
    • Using RedisInsight to browse and manage a Redis instance.
    • Modeling domain objects with Redis Hashes, querying them with Redis Search.
    • Storing and querying document data with Redis JSON.
    • Asynchronous messaging and fast data ingestion with Redis Streams.
    • Speeding up response times with Redis caching and Express middleware.
    • Scaling your application using Redis as a session store for Express.

    Software Prerequisites

    To get the most from this course, you'll need a machine that can run Node.js applications, plus Docker as we'll be using a container that includes Redis and the required add-on modules for this course.

    You'll need the following installed on your machine:

    • Docker (you'll need the docker-compose command)
    • Node.js (use the current Long Term Stable - LTS - version)
    • git command line tools
    • Your favorite IDE (we like VSCode, but anything you're comfortable with works)
    • Postman - we're going to make some API calls and Postman makes that easy.

    Let's Learn Together

    Sign up for the Redis Discord where you can ask me anything about the course!

    - + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/whatisredis/index.html b/develop/node/nodecrashcourse/whatisredis/index.html index e1ba2a6a38..11777fb182 100644 --- a/develop/node/nodecrashcourse/whatisredis/index.html +++ b/develop/node/nodecrashcourse/whatisredis/index.html @@ -4,7 +4,7 @@ What is Redis?: An Overview | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    What is Redis?: An Overview


    Profile picture for Simon Prickett
    Author:
    Simon Prickett, Principal Developer Advocate at Redis

    Redis is an open source data structure server. It belongs to the class of NoSQL databases known as key/value stores. Keys are unique identifiers, whose value can be one of the data types that Redis supports. These data types range from simple Strings, to Linked Lists, Sets and even Streams. Each data type has its own set of behaviours and commands associated with it.

    For example, I can store my name in a Redis String and associate it with the key "myname" using a Redis SET command. I can then retrieve the value using a Redis GET command. Here's how that looks using redis-cli, a command line interface to Redis:

    127.0.0.1:6379> set myname "Simon Prickett"
    OK
    127.0.0.1:6379> get myname
    "Simon Prickett"

    Keys in a Redis database are distributed in a flat keyspace. Redis does not enforce a schema or naming policy for keys. This provides great flexibility, with the organization of the keyspace being the responsibility of the developer. We'll look at ways of managing this later in the course.

    Redis is famous for being an extremely fast database. This speed comes from the fact that it stores and serves all data from memory rather than disk. Redis is durable, so your data will be persisted, but all reads will be from a copy of the data held in memory. This makes Redis an excellent choice for applications that require real time data access.

    Redis is also often used as a cache, and has specific functionality to support this. Redis can be extended with new functionality using plugin modules. We'll see how to use some of these as we make our way through the course.

    External Resources

    Here's some resources that we think will be useful to you as you discover Redis:

    • redis.io - the official website of open source Redis.
    • Redis Enterprise Cloud - a fully managed cloud service from Redis - you can try it out using the full featured free tier.
    • The official Redis Docker image.
    • For a comprehensive introduction to Redis, we recommend taking a look at the RU101, Introduction to Redis Data Structures course at Redis University. In this free online course, you’ll learn about the data structures in Redis, and you’ll see how to practically apply them in the real world.
    - + \ No newline at end of file diff --git a/develop/node/redis-om/index.html b/develop/node/redis-om/index.html index 7e9df433ca..e304fee854 100644 --- a/develop/node/redis-om/index.html +++ b/develop/node/redis-om/index.html @@ -4,7 +4,7 @@ Up and Running with Express and Redis OM for Node.js in 5-minutes | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Up and Running with Express and Redis OM for Node.js in 5-minutes


    Profile picture for Guy Royse
    Author:
    Guy Royse, Senior Developer Advocate at Redis

    OK. So that title is a bold claim. And this is a read-and-follow-along sort of tutorial. So, it might be 6 minutes or 4 minutes depending on how fast you type. Regardless, this should get you building something useful quickly and could make a nice foundation for something bigger.

    Oh, and you might be wondering what Redis OM is. Well, there's an extensive README on GitHub. Go check it out!

    Also, this document, and the code that we're about to implement, and the data needed to test it are all out on GitHub. Refer to them as you need.

    Let's Build Something

    So, what are we going to build? We're going to build a RESTful service that lets you manage songs. It'll let you do all the CRUD things (that's create, read, update, and delete for the uninitiated) with songs. Plus, we'll add some cool search endpoints to the service as well. That way, we can find songs by an artist or genre, from a particular year, or with certain lyrics.

    Test data for this problem was a little tricky. Most song lyrics are copyrighted and getting permission to use them for a little old tutorial wasn't really an option. And we definitely want to be able to search on song lyrics. How else are we going to find that song that goes "oooo ah ah ah ah"?

    Fortunately, my buddy Dylan Beattie is literally the original Rockstar developer. In addition to coding cool things, he writes parody songs with tech themes. And, he has given me permission to use them as test data.

    Humble Beginnings

    We're using Redis as our database—that's the whole idea behind Redis OM. So, you'll need some Redis, specifically with Search and JSON installed. The easiest way to do this is to set up a free Redis Cloud instance. But, you can also use Docker:

    $ docker run -p 6379:6379 redis/redis-stack:latest

    I'm assuming you are relatively Node.js savvy so you should be able to get that installed on your own. We'll be using the top-level await feature for modules that was introduced in Node v14.8.0 so do make sure you have that version, or a newer one. If you don't, go and get it.

    Once you have that, it's time to create a project:

    $ npm init

    Give it a name, version, and description. Use whatever you like. I called mine "Metalpedia".

    Install Express and Redis OM for Node.js:

    $ npm install express redis-om --save

    And, just to make our lives easy, we'll use nodemon:

    $ npm install nodemon --save-dev

    Now that stuff is installed, let's set up some other details in our package.json. First, set the "type" to "module", so we can use ES6 Modules:

      "type": "module",

    The "test" script that npm init generates isn't super useful for us. Replace that with a "start" script that calls nodemon. This will allow the service we build to restart automatically whenever we change a file. Very convenient:

      "scripts": {
    "start": "nodemon server.js"
    },

    I like to make my packages private, so they don't accidentally get pushed to NPM:

      "private": true,

    Oh, and you don't need the "main" entry. We're not building a package to share. So go ahead and remove that.

    Now, you should have a package.json that looks something like this:

    {
    "name": "metalpedia",
    "version": "1.0.0",
    "description": "Sample application for building a music repository backed by Redis and Redis OM.",
    "type": "module",
    "scripts": {
    "start": "nodemon server.js"
    },
    "author": "Guy Royse <guy@guyroyse.com> (http://guyroyse.com/)",
    "license": "MIT",
    "private": true,
    "dependencies": {
    "express": "^4.17.1",
    "redis-om": "^0.2.0"
    },
    "devDependencies": {
    "nodemon": "^2.0.14"
    }
    }

    Excellent. Set up done. Let's write some code!

    Getting the Express Service Up and Running

    I like to write my services with a little version and name endpoint at the root. That way if some random developer hits the site of the service, they'll get a clue as to what it is. So let's do that:

    Create a file named server.js in the root of your project folder and populate it thus:

    import express from 'express';

    // create an express app and use JSON
    let app = new express();
    app.use(express.json());

    // setup the root level GET to return name and version from package.json
    app.get('/', (req, res) => {
    res.send({
    name: process.env.npm_package_name,
    version: process.env.npm_package_version,
    });
    });

    // start listening
    app.listen(8080);

    We now have enough to actually run something. So let's run it:

    $ npm start

    Then, hit http://localhost:8080/ in your favorite browser. You should see something like this:

    {
    "name": "metalpedia",
    "version": "1.0.0"
    }

    Or, hit your service using curl (and json_pp if you want to be fancy):

    $ curl -X GET http://localhost:8080 -s | json_pp
    {
    "name": "metalpedia",
    "version": "1.0.0"
    }

    Cool. Let's add some Redis.

    Mapping Songs to Redis

    We're going to use Redis OM to map data for a song from JSON data in Redis to JavaScript objects.

    Create a file named song-repository.js in the root of your project folder. In it, import all the parts from Redis OM that you'll need:

    import { Entity, Schema, Client, Repository } from 'redis-om';

    Entities are the classes that you work with—the thing being mapped to. They are what you create, read, update, and delete. Any class that extends Entity is an entity. We'll define our Song entity with a single line for now, but we'll add some more to it later:

    class Song extends Entity {}

    Schemas define the fields on your entity, their types, and how they are mapped internally to Redis. By default, entities map to Hashes in Redis but we want ours to use JSON instead. When a Schema is created, it will add properties to the provided entity class based on the schema information provided. Here's a Schema that maps to our Song:

    let schema = new Schema(Song, {
    title: { type: 'string' }, // the title of the song
    artist: { type: 'string' }, // who performed the song
    genres: { type: 'string[]' }, // array of strings for the genres of the song
    lyrics: { type: 'text' }, // the full lyrics of the song
    music: { type: 'text' }, // who wrote the music for the song
    year: { type: 'number' }, // the year the song was releases
    duration: { type: 'number' }, // the duration of the song in seconds
    link: { type: 'string' }, // link to a YouTube video of the song
    });

    Clients are used to connect to Redis. Create a Client and pass your Redis URL in the constructor. If you don't specify a URL, it will default to redis://localhost:6379. Clients have methods to .open, .close, and .execute raw Redis commands, but we're just going to open it:

    let client = await new Client().open();

    Remember that top-level await stuff I mentioned at the top of the document? There it is!

    Now we have all the pieces that we need to create a Repository. Repositories are the main interface into Redis OM. They give us the methods to read, write, and remove entities. Create a repository—and make sure it's exported as you'll need it when we get into the Express stuff:

    export let songRepository = client.fetchRepository(schema);

    We're almost done with setting up our repository. But we still need to create an index or we won't be able to search on anything. We do that by calling .createIndex. If an index already exists and it's the same, this function won't do anything. If it is different, it'll drop it and create a new one. In a real environment, you'd probably want to create your index as part of CI/CD. But we'll just cram them into our main code for this example:

    await songRepository.createIndex();

    We have what we need to talk to Redis. Now, let's use it to make some routes in Express.

    Using Redis OM for CRUD Operations

    Let's create a truly RESTful API with the CRUD operations mapping to PUT, GET, POST, and DELETE respectively. We're going to do this using Express Routers as this makes our code nice and tidy. So, create a file called song-router.js in the root of your project folder. Then add the imports and create a Router:

    import { Router } from 'express';
    import { songRepository as repository } from './song-repository.js';

    export let router = Router();

    This router needs to be added in server.js under the /song path so let's do that next. Add the following line of code to at the top of server.js—with all the other imports—to import the song router:

    import { router as songRouter } from './song-router.js';

    Also add a line of code to call .use so that the router we are about to implement is, well, used:

    app.use('/song', songRouter);

    Our server.js should now look like this:

    import express from 'express';
    import { router as songRouter } from './song-router.js';

    // create an express app and use JSON
    let app = new express();
    app.use(express.json());

    // bring in some routers
    app.use('/song', songRouter);

    // setup the root level GET to return name and version from package.json
    app.get('/', (req, res) => {
    res.send({
    name: process.env.npm_package_name,
    version: process.env.npm_package_version,
    });
    });

    // start listening
    app.listen(8080);

    Add a Create Route

    Now, let's start putting some routes in our song-router.js. We'll create a song first as you need to have songs in Redis before you can do any of the reading, updating, or deleting of them. Add the PUT route below. This route will call .createEntity to create an entity, set all the properties on the newly created entity, and then call .save to persist it:

    router.put('/', async (req, res) => {
    // create the Song so we can save it
    let song = repository.createEntity();

    // set all the properties, converting missing properties to null
    song.title = req.body.title ?? null;
    song.artist = req.body.artist ?? null;
    song.genres = req.body.genres ?? null;
    song.lyrics = req.body.lyrics ?? null;
    song.music = req.body.music ?? null;
    song.year = req.body.year ?? null;
    song.duration = req.body.duration ?? null;
    song.link = req.body.link ?? null;

    // save the Song to Redis
    let id = await repository.save(song);

    // return the id of the newly created Song
    res.send({ id });
    });

    Now that we have a way to shove songs into Redis, let's start shoving. Out on GitHub, there are a bunch of JSON files with song data in them. (Thanks Dylan!) Go ahead and pull those down and place them in a folder under your project root called songs.

    Let's use curl to load in a song. I'm partial to HTML, sung to the tune of AC/DC's Highway to Hell, so let's use that one:

    $ curl -X PUT -H "Content-Type: application/json" -d "@songs/html.json" http://localhost:8080/song -s | json_pp

    You should get back the ID of that newly inserted song:

    {
    "id": "01FKRW9WMVXTGF71NBEM3EBRPY"
    }

    We're shipping HTML indeed. If you have the redis-cli handy—or want to use RedisInsight—you can take a look and see how Redis has stored this:

    > json.get Song:01FKRW9WMVXTGF71NBEM3EBRPY
    "{\"title\":\"HTML\",\"artist\":\"Dylan Beattie and the Linebreakers\",\"genres\":[\"blues rock\",\"hard rock\",\"parody\",\"rock\"],\"lyrics\":\"W3C, RFC, a JIRA ticket and a style guide.\\\\nI deploy with FTP, run it all on the client side\\\\nDon\xe2\x80\x99t need Ruby, don\xe2\x80\x99t need Rails,\\\\nAin\xe2\x80\x99t nothing running on my stack\\\\nI\xe2\x80\x99m hard wired, for web scale,\\\\nYeah, I\xe2\x80\x99m gonna bring the 90s back\\\\n\\\\nI\xe2\x80\x99m shipping HTML,\\\\nHTML,\\\\nI\xe2\x80\x99m shipping HTML,\\\\nHTML\xe2\x80\xa6\\\\n\\\\nNo logins, no trackers,\\\\nNo cookie banners to ignore\\\\nI ain\xe2\x80\x99t afraid of, no hackers\\\\nJust the occasional 404\\\\nThey hatin\xe2\x80\x99, what I do,\\\\nBut that\xe2\x80\x99s \xe2\x80\x98cos they don\xe2\x80\x99t understand\\\\nMark it up, break it down,\\\\nRemember to escape your ampersands\xe2\x80\xa6\\\\n\\\\nI\xe2\x80\x99m shipping HTML,\\\\nHTML,\\\\nI\xe2\x80\x99m shipping HTML,\\\\nHTML\xe2\x80\xa6\\\\n\\\\n(But it\xe2\x80\x99s really just markdown.)\",\"music\":\"\\\"Highway to Hell\\\" by AC/DC\",\"year\":2020,\"duration\":220,\"link\":\"https://www.youtube.com/watch?v=woKUEIJkwxI\"}"

    Yep. Looks like JSON.

    Add a Read Route

    Create down, let's add a GET route to read this song from HTTP instead of using the redis-cli:

    router.get('/:id', async (req, res) => {
    // fetch the Song
    let song = await repository.fetch(req.params.id);

    // return the Song we just fetched
    res.send(song);
    });

    Now you can use curl or your browser to load http://localhost:8080/song/01FKRW9WMVXTGF71NBEM3EBRPY to fetch the song:

    $ curl -X GET http://localhost:8080/song/01FKRW9WMVXTGF71NBEM3EBRPY -s | json_pp

    And you should get back the JSON for the song:

    {
    "link": "https://www.youtube.com/watch?v=woKUEIJkwxI",
    "genres": ["blues rock", "hard rock", "parody", "rock"],
    "entityId": "01FKRW9WMVXTGF71NBEM3EBRPY",
    "title": "HTML",
    "lyrics": "W3C, RFC, a JIRA ticket and a style guide.\\nI deploy with FTP, run it all on the client side\\nDon’t need Ruby, don’t need Rails,\\nAin’t nothing running on my stack\\nI’m hard wired, for web scale,\\nYeah, I’m gonna bring the 90s back\\n\\nI’m shipping HTML,\\nHTML,\\nI’m shipping HTML,\\nHTML…\\n\\nNo logins, no trackers,\\nNo cookie banners to ignore\\nI ain’t afraid of, no hackers\\nJust the occasional 404\\nThey hatin’, what I do,\\nBut that’s ‘cos they don’t understand\\nMark it up, break it down,\\nRemember to escape your ampersands…\\n\\nI’m shipping HTML,\\nHTML,\\nI’m shipping HTML,\\nHTML…\\n\\n(But it’s really just markdown.)",
    "duration": 220,
    "artist": "Dylan Beattie and the Linebreakers",
    "music": "\"Highway to Hell\" by AC/DC",
    "year": 2020
    }

    Now that we can read and write, let's implement the REST of the HTTP verbs. REST... get it?

    Add an Update Route

    Here's the code to update using a POST route. You'll note this code is nearly identical to the GET route. Feel free to refactor to a helper function but since this is just a tutorial, I'll skip that for now.:

    router.post('/:id', async (req, res) => {
    // fetch the Song we are replacing
    let song = await repository.fetch(req.params.id);

    // set all the properties, converting missing properties to null
    song.title = req.body.title ?? null;
    song.artist = req.body.artist ?? null;
    song.genres = req.body.genres ?? null;
    song.lyrics = req.body.lyrics ?? null;
    song.music = req.body.music ?? null;
    song.year = req.body.year ?? null;
    song.duration = req.body.duration ?? null;
    song.link = req.body.link ?? null;

    // save the Song to Redis
    let id = await repository.save(song);

    // return the id of the Song we just saved
    res.send({ id });
    });

    And the curl command to try it out, replacing Dylan's HTML with D.M.C.A.—sung to the tune of Y.M.C.A. by the Village People:

    $ curl -X POST -H "Content-Type: application/json" -d "@songs/d-m-c-a.json" http://localhost:8080/song/01FKRW9WMVXTGF71NBEM3EBRPY -s | json_pp

    You should get back the ID of that updated song:

    {
    "id" : "01FKRW9WMVXTGF71NBEM3EBRPY"
    }

    Add a Delete Route

    And, finally, let's implement a DELETE route:

    router.delete('/:id', async (req, res) => {
    // delete the Song with its id
    await repository.remove(req.params.id);

    // respond with OK
    res.type('application/json');
    res.send('OK');
    });

    And test it out:

    $ curl -X DELETE http://localhost:8080/song/01FKRW9WMVXTGF71NBEM3EBRPY -s
    OK

    This just returns "OK", which is technically JSON but aside from the response header, is indistinguishable from plain text.

    Searching with Redis OM

    All the CRUD is done. Let's add some search. Search is where Redis OM really starts to shine. We're going to create routes to:

    • Return all the songs, like, all of them.
    • Fetch songs for a particular artist, like "Dylan Beattie and the Linebreakers".
    • Fetch songs that are in a certain genre, like "rock" or "electronic".
    • Fetch songs between years, like all the songs from the 80s.
    • Fetch songs that have certain words in their lyrics, like "html" or "markdown".

    Load Songs into Redis

    Before we get started, let's load up Redis with a bunch of songs—so we have stuff to search for. I've written a short shell script that loads all the song data on GitHub into Redis using the server we just made. It just calls curl in a loop. It's on GitHub, so go grab it and put it in your project root. Then run it:

    $ ./load-data.sh

    You should get something like:

    {"id":"01FM310A8AVVM643X13WGFQ2AR"} <- songs/big-rewrite.json
    {"id":"01FM310A8Q07D6S7R3TNJB146W"} <- songs/bug-in-the-javascript.json
    {"id":"01FM310A918W0JCQZ8E57JQJ07"} <- songs/d-m-c-a.json
    {"id":"01FM310A9CMJGQHMHY01AP0SG4"} <- songs/enterprise-waterfall.json
    {"id":"01FM310A9PA6DK4P4YR275M58X"} <- songs/flatscreens.json
    {"id":"01FM310AA2XTEQV2NZE3V7K3M7"} <- songs/html.json
    {"id":"01FM310AADVHEZXF7769W6PQZW"} <- songs/lost-it-on-the-blockchain.json
    {"id":"01FM310AASNA81Y9ACFMCGP05P"} <- songs/meetup-2020.json
    {"id":"01FM310AB4M2FKTDPGEEMM3VTV"} <- songs/re-bass.json
    {"id":"01FM310ABFGFYYJXVABX2YXGM3"} <- songs/teams.json
    {"id":"01FM310ABW0ANYSKN9Q1XEP8BJ"} <- songs/tech-sales.json
    {"id":"01FM310AC6H4NRCGDVFMKNGKK3"} <- songs/these-are-my-own-devices.json
    {"id":"01FM310ACH44414RMRHPCVR1G8"} <- songs/were-gonna-build-a-framework.json
    {"id":"01FM310ACV8C72Y69VDQHA12C1"} <- songs/you-give-rest-a-bad-name.json

    Note that this script will not erase any data. So any songs that you have in there already will still be there, alongside these. And if you run this script more than once, it will gleefully add the songs a second time.

    Adding a Songs Router

    Like with the CRUD operations for songs, we need to first create a router. This time we'll name the file songs-router.js. Note the plural. Add all the imports and exports to that file like before:

    import { Router } from 'express';
    import { songRepository as repository } from './song-repository.js';

    export let router = Router();

    Add this router to Express in server.js under /songs, also like we did before. And, again, note the plural. Your server.js should now look like this:

    import express from 'express';
    import { router as songRouter } from './song-router.js';
    import { router as songsRouter } from './songs-router.js';

    // create an express app and use JSON
    let app = new express();
    app.use(express.json());

    // bring in some routers
    app.use('/song', songRouter);
    app.use('/songs', songsRouter);

    // setup the root level GET to return name and version from package.json
    app.get('/', (req, res) => {
    res.send({
    name: process.env.npm_package_name,
    version: process.env.npm_package_version,
    });
    });

    // start listening
    app.listen(8080);

    Add Some Search Routes

    Now we can add some search routes. We initiate a search by calling .search on our repository. Then we call .where to add any filters we want—if we want any at all. Once we've specified the filters, we call .returnAll to get all the matching entities.

    Here's the simplest search—it just returns everything. Go ahead and add it to songs-router.js:

    router.get('/', async (req, res) => {
    let songs = await repository.search().returnAll();
    res.send(songs);
    });

    Then try it out with curl or your browser:

    $ curl -X GET http://localhost:8080/songs -s | json_pp

    We can search for a specific field by calling .where and .eq. This route finds all songs by a particular artist. Note that you must specify the complete name of the artist for this to work:

    router.get('/by-artist/:artist', async (req, res) => {
    let artist = req.params.artist;
    let songs = await repository.search().where('artist').eq(artist).returnAll();
    res.send(songs);
    });

    Then try it out with curl or your browser too:

    $ curl -X GET http://localhost:8080/songs/by-artist/Dylan%20Beattie -s | json_pp

    Genres are stored as an array of strings. You can use .contains to see if the array contains that genre or not:

    router.get('/by-genre/:genre', async (req, res) => {
    let genre = req.params.genre;
    let songs = await repository
    .search()
    .where('genres')
    .contains(genre)
    .returnAll();
    res.send(songs);
    });

    And try it out:

    $ curl -X GET http://localhost:8080/songs/by-genre/rock -s | json_pp
    $ curl -X GET http://localhost:8080/songs/by-genre/parody -s | json_pp

    This route lets you get all the songs between two years. Great for finding all those 80s hits. Of course, all of Dylan's songs are more recent than that, so we'll go a little more narrow when we try it out:

    router.get('/between-years/:start-:stop', async (req, res) => {
    let start = Number.parseInt(req.params.start);
    let stop = Number.parseInt(req.params.stop);
    let songs = await repository
    .search()
    .where('year')
    .between(start, stop)
    .returnAll();
    res.send(songs);
    });

    And, try it out, of course:

    $ curl -X GET http://localhost:8080/songs/between-years/2020-2021 -s | json_pp

    Let's add the final route to find songs that have certain words in the lyrics using .match:

    router.get('/with-lyrics/:lyrics', async (req, res) => {
    let lyrics = req.params.lyrics;
    let songs = await repository
    .search()
    .where('lyrics')
    .match(lyrics)
    .returnAll();
    res.send(songs);
    });

    We can try this out too, getting all the songs that contain both the words "html" and "markdown":

    $ curl -X GET http://localhost:8080/songs/with-lyrics/html%20markdown -s | json_pp

    Wrapping Up

    And that's a wrap. I've walked you through some of the basics with this tutorial. But you should totally go deeper. If you want to learn more, go ahead and check out Redis OM for Node.js on GitHub. It explains the capabilities of Redis OM for Node.js in greater detail.

    If you have any questions or are stuck, feel free to jump on the Redis Discord server and ask there. I'm always hanging out and happy to help.

    And, if you find a flaw, bug, or just think this tutorial could be improved, send a pull request or open an issue.

    Thanks!

    - + \ No newline at end of file diff --git a/develop/php/index.html b/develop/php/index.html index 18abf8e90a..a8ecd8f004 100644 --- a/develop/php/index.html +++ b/develop/php/index.html @@ -4,7 +4,7 @@ PHPRedis - Redis client library for PHP | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    PHPRedis - Redis client library for PHP


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Find tutorials, examples and technical articles that will help you to develop with Redis and PHP.

    Getting Started

    In order to use Redis with PHP you will need a PHP Redis client. In the following sections, we will demonstrate the use of PhpRedis, a flexible and feature-complete Redis client library for PHP. Additional PHP clients for Redis can be found under the PHP section of the Redis Clients page.

    Redis is an open source, in-memory, key-value data store most commonly used as a primary database, cache, message broker, and queue. Redis cache delivers sub-millisecond response times, enabling fast and powerful real-time applications in industries such as gaming, fintech, ad-tech, social media, healthcare, and IoT.

    Step 1. Run a Redis server

    You can either run Redis server in a Docker container or directly on your machine. Follow the below command to setup a Redis server locally on Mac OS:

     brew tap redis-stack/redis-stack
    brew install --cask redis-stack
    INFO

    Redis Stack unifies and simplifies the developer experience of the leading Redis modules and the capabilities they provide. Learn more

    Ensure that you are able to use the following Redis command to connect to the Redis instance.

     redis-cli -h localhost -p 6379
    localhost>

    Now you should be able to perform CRUD operations with Redis keys. The above Redis client command might require password if you have setup authentication in your Redis configuration file. If a Redis password is not set, then it will perform the default connection to Redis server. You can play around inserting data to Redis using SET and then fetching it back with the GET command.

    Step 2. Get pecl

    apt install pkg-php-tools

    Step 3. Install PhpRedis

    pecl install redis

    Step 4. Opening a Connection to Redis Using PhpRedis

    The following code creates a connection to Redis using PhpRedis

    <?php

    $redis = new Redis();
    //Connecting to Redis
    $redis->connect('hostname', port);
    $redis->auth('password');

    if ($redis->ping()) {
    echo "PONG";
    }

    ?>

    Replace the following values with those of your database and save this file as connect.php.

    Step 5. Executing the script

    php connect.php

    It should display "PONG" as output. You can verify this by running the monitor command

    127.0.0.1:6379> monitor
    OK
    1614778301.165001 [0 [::1]:57666] "PING"

    Further Reference:

    Redis Launchpad
    - + \ No newline at end of file diff --git a/develop/python/fastapi/index.html b/develop/python/fastapi/index.html index 25f5cfd3a6..f66a0f49f3 100644 --- a/develop/python/fastapi/index.html +++ b/develop/python/fastapi/index.html @@ -4,7 +4,7 @@ Using Redis with FastAPI | The Home of Redis Developers - + @@ -29,7 +29,7 @@ features.

    If you aren't familiar with asyncio, take a few minutes to watch this primer on asyncio before continuing:

    Installing the Redis Client

    We're going to start this tutorial assuming that you have a FastAPI project to work with. We'll use the IsBitcoinLit project for our examples.

    Poetry is the best way to manage Python dependencies today, so we'll use it in this tutorial.

    IsBitcoinLit includes a pyproject.toml file that Poetry uses to manage the project's directories, but if you had not already created one, you could do so like this:

        $ poetry init

    Once you have a pyproject.toml file, and assuming you already added FastAPI and any other necessary dependencies, you could add aioredis-py to your project like this:

        $ poetry add aioredis@2.0.0
    note

    This tutorial uses aioredis-py 2.0. The 2.0 version of aioredis-py features an API that matches the most popular synchronous Redis client for Python, redis-py.

    The aioredis-py client is now installed. Time to write some code!

    Integrate aioredis-py with FastAPI

    We're going to use Redis for a few things in this FastAPI app:

    1. Storing 30-second averages of sentiment and price for the last 24 hours with Redis Time Series
    2. Rolling up these averages into a three-hour snapshot with Redis Time Series
    3. Caching the three-hour snapshot

    Let's look at each of these integration points in more detail.

    Creating the time series

    The data for our app consists of 30-second averages of Bitcoin prices and sentiment ratings for the last 24 hours. We pull these from the SentiCrypt API.

    note

    We have no affiliation with SentiCrypt or any idea how accurate these numbers are. This example is just for fun!

    We're going to store price and sentiment averages in a time series with Redis Stack, so we want to make sure that when the app starts up, the time series exists.

    We can use a startup event to accomplish this. Doing so looks like the following:

    @app.on_event('startup')
    async def startup_event():
    keys = Keys()
    await initialize_redis(keys)

    We'll use the TS.CREATE command to create the time series within our initialize_redis() function:

    async def make_timeseries(key):
    """
    Create a timeseries with the Redis key `key`.

    We'll use the duplicate policy known as "first," which ignores
    duplicate pairs of timestamp and values if we add them.

    Because of this, we don't worry about handling this logic
    ourselves -- but note that there is a performance cost to writes
    using this policy.
    """
    try:
    await redis.execute_command(
    'TS.CREATE', key,
    'DUPLICATE_POLICY', 'first',
    )
    except ResponseError as e:
    # Time series probably already exists
    log.info('Could not create time series %s, error: %s', key, e)

    tip

    When you create a time series, use the DUPLICATE_POLICY option to specify how to handle duplicate pairs of timestamp and values.

    Storing Sentiment and Price Data in Redis

    A /refresh endpoint exists in the app to allow a client to trigger a refresh of the 30-second averages. This is the entire function:

    @app.post('/refresh')
    async def refresh(background_tasks: BackgroundTasks, keys: Keys = Depends(make_keys)):
    async with httpx.AsyncClient() as client:
    data = await client.get(SENTIMENT_API_URL)
    await persist(keys, data.json())
    data = await calculate_three_hours_of_data(keys)
    background_tasks.add_task(set_cache, data, keys)

    As is often the case with Python, a lot happens in a few lines, so let's walk through them.

    The first thing we do is get the latest sentiment and price data from SentiCrypt. The response data looks like this:

    [
    {
    "count": 7259,
    "timestamp": 1625592626.3452034,
    "rate": 0.0,
    "last": 0.33,
    "sum": 1425.82,
    "mean": 0.2,
    "median": 0.23,
    "btc_price": "33885.23"
    }
    //... Many more entries
    ]

    Then we save the data into two time series in Redis with the persist() function. That ends up calling another helper, add_many_to_timeseries(), like this:

        await add_many_to_timeseries(
    (
    (ts_price_key, 'btc_price'),
    (ts_sentiment_key, 'mean'),
    ), data,
    )

    The add_many_to_timeseries() function takes a list of (time series key, sample key) pairs and a list of samples from SentiCrypt. For each sample, it reads the value of the sample key in the SentiCrypt sample, like "btc_price," and adds that value to the given time eries key.

    Here's the function:

    async def add_many_to_timeseries(
    key_pairs: Iterable[Tuple[str, str]],
    data: BitcoinSentiments
    ):
    """
    Add many samples to a single timeseries key.

    `key_pairs` is an iteratble of tuples containing in the 0th position the
    timestamp key into which to insert entries and the 1th position the name
    of the key within th `data` dict to find the sample.
    """
    partial = functools.partial(redis.execute_command, 'TS.MADD')
    for datapoint in data:
    for timeseries_key, sample_key in key_pairs:
    partial = functools.partial(
    partial, timeseries_key, int(
    float(datapoint['timestamp']) * 1000,
    ),
    datapoint[sample_key],
    )
    return await partial()

    This code is dense, so let's break it down.

    We're using the TS.MADD command to add many samples to a time series. We use TS.MADD because doing so is faster than TS.ADD for adding batches of samples to a time series.

    This results in a single large TS.MADD call that adds price data to the price time series and sentiment data to the sentiment timeseries. Conveniently, TS.MADD can add samples to multiple time series in a single call.

    Calculating Three-Hour Averages with Redis

    Clients use IsBitcoinLit to get the average price and sentiment for each of the last three hours. But so far, we've only stored 30-second averages in Redis. How do we calculate the average of these averages for the last three hours?

    When we run /refresh, we call calculate_three_hours_of_data() to do so. The function looks like this:

    async def calculate_three_hours_of_data(keys: Keys) -> Dict[str, str]:
    sentiment_key = keys.timeseries_sentiment_key()
    price_key = keys.timeseries_price_key()
    three_hours_ago_ms = int((now() - timedelta(hours=3)).timestamp() * 1000)

    sentiment = await get_hourly_average(sentiment_key, three_hours_ago_ms)
    price = await get_hourly_average(price_key, three_hours_ago_ms)

    last_three_hours = [{
    'price': data[0][1], 'sentiment': data[1][1],
    'time': datetime.fromtimestamp(data[0][0] / 1000, tz=timezone.utc),
    }
    for data in zip(price, sentiment)]

    return {
    'hourly_average_of_averages': last_three_hours,
    'sentiment_direction': get_direction(last_three_hours, 'sentiment'),
    'price_direction': get_direction(last_three_hours, 'price'),
    }

    There is more going on here than we need to know for this tutorial. As a summary, most of this code exists to support calls to get_hourly_average().

    That function is where the core logic exists to calculate averages for the last three hours, so let's see what it contains:

    async def get_hourly_average(ts_key: str, top_of_the_hour: int):
    response = await redis.execute_command(
    'TS.RANGE', ts_key, top_of_the_hour, '+',
    'AGGREGATION', 'avg', HOURLY_BUCKET,
    )
    # The response is a list of the structure [timestamp, average].
    return response

    Here, we use the TS.RANGE command to get the samples in the timeseries from the "top" of the hour three hours ago, until the latest sample in the series. With the AGGREGATE parameter, we get back the averages of the samples in hourly buckets.

    So where does this leave us? With averages of the averages, one for each of the last three hours.

    Caching Data with Redis

    Let's review. We have code that achieves the following:

    1. Gets the latest sentiment and price data from SentiCrypt.
    2. Saves the data into two time series in Redis.
    3. Calculates the average of the averages for the last three hours.

    The snapshot of averages for the last three hours is the data we want to serve clients when they hit the /is-bitcoin-lit endpoint. We could run this calculation every time a client requests data, but that would be inefficient. Let's cache it in Redis!

    First, we'll look at writing to the cache. Then we'll see how FastAPI reads from the cache.

    Writing Cache Data to Redis

    Take a closer look at the last line of the refresh() function:

        background_tasks.add_task(set_cache, data, keys)

    In FastAPI, you can run code outside of a web request after returning a response. This feature is called background tasks.

    This is not as robust as using a background task library like Celery. Instead, Background Tasks are a simple way to run code outside of a web request, which is a great fit for things like updating a cache.

    When you call add_task(), you pass in a function and a list of arguments. Here, we pass in set_cache(). This function saves the three-hour averages summary to Redis. Let's look at how it works:

    async def set_cache(data, keys: Keys):
    def serialize_dates(v):
    return v.isoformat() if isinstance(v, datetime) else v

    await redis.set(
    keys.cache_key(),
    json.dumps(data, default=serialize_dates),
    ex=TWO_MINUTES,
    )

    First, we serialize the three-hour summary data to JSON and save it to Redis. We use the ex parameter to set the expiration time for the data to two minutes.

    TIP: You need to provide a default serializer for the json.dumps() function so that dumps() knows how to serialize datetime objects.

    This means that after every refresh, we've primed the cache. The cache isn't primed for long -- only two minutes -- but it's something!

    Reading Cache Data to Redis

    We haven't even seen the API endpoint that clients will use yet! Here it is:

    @app.get('/is-bitcoin-lit')
    async def bitcoin(background_tasks: BackgroundTasks, keys: Keys = Depends(make_keys)):
    data = await get_cache(keys)

    if not data:
    data = await calculate_three_hours_of_data(keys)
    background_tasks.add_task(set_cache, data, keys)

    return data

    To use this endpoint, clients make a GET request to /is-bitcoin-lit. Then we try to get the cached three-hour summary from Redis. If we can't, we calculate the three-hour summary, return it, and then save it outside of the web request.

    We've already seen how calculating the summary data works, and we just explored saving the summary data to Redis. So, let's look at the get_cache() function, where we read the cached data:

    def datetime_parser(dct):
    for k, v in dct.items():
    if isinstance(v, str) and v.endswith('+00:00'):
    try:
    dct[k] = datetime.datetime.fromisoformat(v)
    except:
    pass
    return dct


    async def get_cache(keys: Keys):
    current_hour_cache_key = keys.cache_key()
    current_hour_stats = await redis.get(current_hour_cache_key)

    if current_hour_stats:
    return json.loads(current_hour_stats, object_hook=datetime_parser)

    Remember that when we serialized the summary data to JSON, we needed to provide a default serializer for json.dumps() that understood datetime objects. Now that we're deserializing that data, we need to give json.loads() an "object hook" that understands datetime strings. That's what datetime_parser() does.

    Other than parsing dates, this code is relatively straightforward. We get the current hour's cache key, and then we try to get the cached data from Redis. If we can't, we return None.

    Summary

    Putting all the pieces together, we now have a FastAPI app that can retrieve Bitcoin price and sentiment averages, store the averages in Redis, cache three-hour summary data in Redis, and serve the data to clients. Not too shabby!

    Here are a few notes to consider:

    1. We manually controlled caching in this tutorial, but you can also use a library like aiocache to cache data in Redis.
    2. We ran Redis commands like TS.MADD using the execute_command() method in aioredis-py. If you are instead using redis-py in a synchronous project, you can use the same commands.
    - + \ No newline at end of file diff --git a/develop/python/index.html b/develop/python/index.html index 4746b3cc0e..ff347082f1 100644 --- a/develop/python/index.html +++ b/develop/python/index.html @@ -4,7 +4,7 @@ Python with Redis | The Home of Redis Developers - + @@ -16,7 +16,7 @@ Learn more

    Ensure that you are able to use the following Redis command to connect to the Redis instance.

     redis-cli
    127.0.0.1:6379>

    Now, you should be able to get Redis data by using Redis commands.

    Step 2. Install the Redis client library using pip

    The following Python code allows you to connect to the default Redis server instance .

     pip3 install redis

    Step 2. Write your application code

     import redis

    pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
    redis = redis.Redis(connection_pool=pool)

    redis.set('mykey', 'Hello from Python!')
    value = redis.get('mykey')
    print(value)

    redis.zadd('vehicles', {'car' : 0})
    redis.zadd('vehicles', {'bike' : 0})
    vehicles = redis.zrange('vehicles', 0, -1)
    print(vehicles)

    Find more information about Redis database instances & Redis connections in the "Redis Connect".

    More developer resources

    Sample Code

    Flask Simple Rate limiting Example Application that shows how to do rate limiting using various Redis datastructure.

    Technical Articles & Videos

    Beyond the Cache with Python


    Redis Launchpad

    Redis Launchpad is like an “App Store” for Redis sample apps. You can easily find apps for your preferred frameworks and languages. Check out a few of these apps below, or click here to access the complete list.

    Rate-Limiting app in Python & Django

    launchpad

    Rate Limiting app built in Python & Django

    Leaderboard app in Python & Django

    launchpad

    How to implement leaderboard app in Python & Django

    Redis University

    Redis for Python Developers

    A complete introduction to Redis for Python developers.

    References

    Redis Launchpad
    - + \ No newline at end of file diff --git a/develop/python/redis-om/index.html b/develop/python/redis-om/index.html index 5054c3c20b..72d82dee49 100644 --- a/develop/python/redis-om/index.html +++ b/develop/python/redis-om/index.html @@ -4,7 +4,7 @@ Getting Started With Redis OM for Python | The Home of Redis Developers - + @@ -95,7 +95,7 @@ defined earlier. We'll add Field(index=True) to tell Redis OM that we want to index the last_name and age fields:

    import datetime
    from typing import Optional

    from pydantic import EmailStr

    from redis_om import (
    Field,
    get_redis_connection,
    HashModel,
    Migrator
    )


    class Customer(HashModel):
    first_name: str
    last_name: str = Field(index=True)
    email: EmailStr
    join_date: datetime.date
    age: int = Field(index=True)
    bio: Optional[str]


    # Now, if we use this model with a Redis deployment that has the
    # Redis Stack installed, we can run queries like the following.

    # Before running queries, we need to run migrations to set up the
    # indexes that Redis OM will use. You can also use the `migrate`
    # CLI tool for this!
    Migrator().run()


    # Find all customers with the last name "Brookins"
    Customer.find(Customer.last_name == "Brookins").all()

    # Find all customers that do NOT have the last name "Brookins"
    Customer.find(Customer.last_name != "Brookins").all()

    # Find all customers whose last name is "Brookins" OR whose age is
    # 100 AND whose last name is "Smith"
    Customer.find((Customer.last_name == "Brookins") | (
    Customer.age == 100
    ) & (Customer.last_name == "Smith")).all()

    Next Steps

    Now that you know the basics of working with Redis OM, start playing around with it in your project!

    If you're a FastAPI user, check out how to integrate Redis OM with FastAPI.

    - + \ No newline at end of file diff --git a/develop/ruby/index.html b/develop/ruby/index.html index 5e266432aa..b544b7d933 100644 --- a/develop/ruby/index.html +++ b/develop/ruby/index.html @@ -4,7 +4,7 @@ Ruby and Redis | The Home of Redis Developers - + @@ -16,7 +16,7 @@ Follow the below commands to setup a Redis server on Mac OS:

     brew tap redis-stack/redis-stack
    brew install --cask redis-stack
    INFO

    Redis Stack unifies and simplifies the developer experience of the leading Redis modules and the capabilities they provide. Learn more

    Ensure that you are able to use the following Redis command to connect to the Redis instance.

     redis-cli
    127.0.0.1:6379>

    Now you should be able to perform CRUD operations with Redis commands. For example, you can insert data to Redis with the SET command and the fetch it with the GET command. The above Redis client command might require a password if you have setup authentication in your Redis configuration file. By default, Redis listens on port 6379. This can be modified in the Redis configuration file.

    Step 2. Clone the repository

    git clone https://github.com/redis-developer/redis-ruby-getting-started

    Step 3. Install redis-rb:

    $ gem install redis

    OR

    Step 4. Use Gemfile

    $cat Gemfile
    gem 'redis'

    Step 5. Execute

    $ bundle install
    Resolving dependencies...
    Using bundler 2.2.6
    Using redis 4.2.5
    Following files may not be writable, so sudo is needed:
    /Library/Ruby/Gems/2.6.0
    /Library/Ruby/Gems/2.6.0/build_info
    /Library/Ruby/Gems/2.6.0/cache
    /Library/Ruby/Gems/2.6.0/doc
    /Library/Ruby/Gems/2.6.0/extensions
    /Library/Ruby/Gems/2.6.0/gems
    /Library/Ruby/Gems/2.6.0/specifications
    Bundle complete! 1 Gemfile dependency, 2 gems now installed.
    Use `bundle info [gemname]` to see where a bundled gem is installed.

    Step 6. Verifying

    bundle info redis
    * redis (4.2.5)
    Summary: A Ruby client library for Redis
    Homepage: https://github.com/redis/redis-rb
    Documentation: https://www.rubydoc.info/gems/redis/4.2.5
    Source Code: https://github.com/redis/redis-rb/tree/v4.2.5
    Changelog: https://github.com/redis/redis-rb/blob/master/CHANGELOG.md
    Bug Tracker: https://github.com/redis/redis-rb/issues
    Path: /Library/Ruby/Gems/2.6.0/gems/redis-4.2.5

    Step 7. Opening a Connection to Redis Using redis-rb

    The following code creates a connection to Redis using redis-rb:

    require 'redis'
    redis = Redis.new(host: "localhost", port: 6379, db: 11)
    redis.set("mykey", "hello world")
    redis.get("mykey")

    To adapt this example to your code, make sure that you replace the following values with those of your database:

    You can find connect.rb under this directory which you can directly use to test it.

    Just execute the below command:

    ruby connect.rb

    Step 8. Verifying

    127.0.0.1:6379> monitor
    OK
    1614684665.728109 [0 [::1]:50918] "select" "11"
    1614684665.728294 [11 [::1]:50918] "set" "mykey" "hello world"
    1614684665.728435 [11 [::1]:50918] "get" "mykey"

    Redis Launchpad

    Redis Launchpad is like an “App Store” for Redis sample apps. You can easily find apps for your preferred frameworks and languages. Check out a few of these apps below, or click here to access the complete list.

    Rate-Limiting app in Ruby on Rails

    Launchpad

    Rate Limiting app built in Ruby on Rails

    Leaderboard app in Ruby on Rails

    Launchpad

    How to implement leaderboard app in Ruby on Rails

    Further References

    - + \ No newline at end of file diff --git a/develop/rust/index.html b/develop/rust/index.html index 0a57425cbd..afca85094c 100644 --- a/develop/rust/index.html +++ b/develop/rust/index.html @@ -4,7 +4,7 @@ Rust and Redis | The Home of Redis Developers - + @@ -15,7 +15,7 @@ The web page “Redis Enterprise and Rust” will help you to get started with Redis Enterprise and Rust in a much easier manner. redis-rs is a rust implementation of a Redis client library. It exposes a general purpose interface to Redis and also provides specific helpers for commonly used functionality.

    Step 1. Install Rust

     curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh

    Step 2. Configure your current shell:

     source $HOME/.cargo/env

    Step 3. Verify Rust compiler:

     rustc --version
    rustc 1.49.0

    Step 4. Creating Cargo.toml with Redis dependency:

     [dependencies]
    redis = "0.8.0"

    Step 5. Clone the repository

     git clone https://github.com/redis-developer/redis-rust-getting-started

    Step 6. Run the application

     cargo run

    Further References

    - + \ No newline at end of file diff --git a/ebooks/8-nosql-data-modeling-patterns/index.html b/ebooks/8-nosql-data-modeling-patterns/index.html index ec6e542cca..b0cd4f9965 100644 --- a/ebooks/8-nosql-data-modeling-patterns/index.html +++ b/ebooks/8-nosql-data-modeling-patterns/index.html @@ -4,7 +4,7 @@ Learn 8 NoSQL Data Modeling Patterns in Redis | The Home of Redis Developers - + @@ -52,7 +52,7 @@ that shows all the details of each item. There is a 1-to-1 relationship between each item in the list view and the detailed view (shown in Picture 2) of the item. The detailed view shows all the details such as multiple photos, description, manufacturer, dimensions, weight, and so on.

    Picture 1Picture 1 1-to-1 List View
    Picture 2Picture 2 1-to-1 Detailed View

    1-to-1 Relationships using SQL

    In a relational database, you may create a table called products where each row holds just enough data to display the information in the list view. Then, you may create another table called product_details where each row holds the rest of the details. You would also need a product_images table, where you store all of the images for a product. You can see the entity relationship diagram in Picture 3.

    Picture 3Picture 3 1-to-1 Entity Diagram

    Picture 3 depicts the entity relationships between products, product_details, and product_images and represents a normalized data model with a single denormalized field image in the products table. The reason for this is to avoid having to use a SQL JOIN when selecting the products for the list view. Using this model, the SQL query used to get the data needed for the list view might resemble Code Example 1.

    Code Example 1
    SELECT
    p.id, p.name, p.image, p.price, pi.url
    FROM
    products p

    1-to-1 Relationships using Redis

    In Redis, similar to a relational database, you can create a collection called products and another called product_details. But with Redis JSON you can improve this by simply embedding product_images and product_details directly into the Products collection. Then, when you query the Products collection, specify which fields you need based on which view you are trying to create.

    This will allow you to easily keep all the data in one place. This is called the Embedded Pattern and is one of the most common patterns you will see in NoSQL document databases like Redis JSON. Code Example 2 uses Python and a client library called Redis OM (an ORM for Redis) to model Products and ProductDetails. Note that ProductDetails is embedded into Products directly, so all of the data for a product will be stored within the same document.

    Code Example 2
    class ProductDetail(EmbeddedJsonModel):
    description: str
    manufacturer: str
    dimensions: str
    weight: str
    images: List[str]

    class Product(JsonModel):
    name: str = Field(index=True)
    image: str = Field(index=True)
    price: int = Field(index=True)
    details: Optional[ProductDetail]

    Code Example 2 also shows how you can index fields using Redis OM and Redis Search. Doing this turns Redis into not only a document store but also a search engine since Redis Search enables secondary indexing and searching. When you create models using Redis OM, it will automatically manage secondary indexes with Redis Search on your behalf.

    Using Redis OM we can write a function to retrieve our products list for the list view, as shown in Code Example 3.

    Code Example 3
    async def get_product_list():
    results = await connections \
    .get_redis_connection() \
    .execute_command(
    f'FT.SEARCH {Product.Meta.index_name} * LIMIT 0 10 RETURN 3 name image price'
    )
    return Product.from_redis(results)

    Notice that in Code Example 3 we are using the FT.SEARCH command, which specifies the index managed on our behalf by Redis OM and returns three fields: name, image, and price. While the documents all have details and images embedded, we don’t want to display them in the list view so we don’t need to query them. When we want the detailed view, we can query an entire Product document. See Code Example 4 for how to query an entire document.

    Code Example 4
    async def get_product_details(product_id: str):
    return await Product.get(product_id)

    When using Redis, you can use RedisInsight as a GUI tool to visualize and interact with the data in your database. Picture 4 shows you what a Products document looks like.

    Picture 4Picture 4 1-to-1 RedisInsight

    Download the E-book

    I’m sure you’re eager to learn more, so click here to download the full e-book.

    - + \ No newline at end of file diff --git a/ebooks/three-caching-design-patterns/index.html b/ebooks/three-caching-design-patterns/index.html index 1b254d54fb..96ab6a7417 100644 --- a/ebooks/three-caching-design-patterns/index.html +++ b/ebooks/three-caching-design-patterns/index.html @@ -4,7 +4,7 @@ 3 design patterns to speed up MEAN and MERN stack applications | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    3 design patterns to speed up MEAN and MERN stack applications

    Below you will find an excerpt from the e-book. Click here to download the full e-book.

    3 design patterns to speed up MEAN and MERN stack applications

    Introduction

    If you don't design and build software with attention to performance, your applications can encounter significant bottlenecks when they go into production.

    Over time, the development community has learned common techniques that work as reliable design patterns to solve well-understood problems, including application performance.

    So what are design patterns? They are recommended practices to solve recurring design problems in software systems. A design pattern has four parts: a name, a problem description (a particular set of conditions to which the pattern applies), a solution (the best general strategy for resolving the problem), and a set of consequences.

    Two development stacks that have become popular ways to build Node.js applications are the MEAN stack and the MERN stack. The MEAN stack is made up of the MongoDB database, the Express and Angular.js frameworks, and Node.js. It is a pure JavaScript stack that helps developers create every part of a website or application. In contrast, the MERN stack is made up of MongoDB, the Express and ReactJS frameworks, and Node.js.

    Both stacks work well, which accounts for their popularity. But it doesn't mean the software generated runs as fast as it can—or as fast as it needs to.

    In this post, we share one popular design pattern that developers use with Redis to improve application performance with MEAN and MERN stack applications: the master data-lookup pattern. We explain the pattern in detail and accompany it with an overview, typical use cases, and a code example. Our intent is to help you understand when and how to use this particular pattern in your own software development. The Ebook has other patterns too like The cache-aside pattern and The write-behind pattern

    Building a movie application

    The demo application used in the rest of this tutorial showcases a movie application with basic create, read, update, and delete (CRUD) operations. demo-01

    The movie application dashboard contains a search section at the top and a list of movie cards in the middle. The floating plus icon displays a pop-up when the user selects it, permitting the user to enter new movie details. The search section has a text search bar and a toggle link between text search and basic (that is, form-based) search. Each movie card has edit and delete icons, which are displayed when a mouse hovers over the card.

    This tutorial uses a GitHub sample demo that was built using the following tools:

    • Frontend: ReactJS (18.2.0)
    • Backend: Node.js (16.17.0)
    • Database: MongoDB
    • Cache and database: Redis stack (using Docker)
    GITHUB CODE

    Below are the commands to clone the source code (frontend and backend) for the application used in this tutorial

    git clone https://github.com/redis-developer/ebook-speed-mern-frontend.git

    git clone https://github.com/redis-developer/ebook-speed-mern-backend.git

    The master data-lookup pattern

    One ongoing developer challenge is to (swiftly) create, read, update, and (possibly) delete data that lives long, changes infrequently, and is regularly referenced by other data, directly or indirectly. That's a working definition of master data, especially when it also represents the organization's core data that is considered essential for its operations.

    Master data generally changes infrequently. Country lists, genres, and movie languages usually stay the same. That presents an opportunity to speed things up. You can address access and manipulation operations so that data consistency is preserved and data access happens quickly.

    From a developer's point of view, master data lookup refers to the process by which master data is accessed in business transactions, in application setup, and any other way that software retrieves the information. Examples of master data lookup include fetching data for user interface (UI) elements (such as drop-down dialogs, select values, multi-language labels), fetching constants, user access control, theme, and other product configuration. And you can do that even when you rely primarily on MongoDB as a persistent data store.

    pattern

    To serve master data from Redis, preload the data from MongoDB.

    1. Read the master data from MongoDB on application startup and store a copy of the data in Redis. This pre-caches the data for fast retrieval. Use a script or a cron job to repeatedly copy master data to Redis.
    2. The application requests master data.
    3. Instead of MongoDB serving the data, the master data will be served from Redis.

    Use cases

    Consider this pattern when you need to

    • Serve master data at speed: By definition, nearly every application requires access to master data. Pre-caching master data with Redis delivers it to users at high speed.
    • Support massive master tables: Master tables often have millions of records. Searching through them can cause performance bottlenecks. Use Redis to perform real-time search on the master data to increase performance with sub-millisecond response.
    • Postpone expensive hardware and software investments: Defer costly infrastructure enhancements by using Redis. Get the performance and scaling benefits without asking the CFO to write a check.

    Demo

    The image below illustrates a standard way to showcase a UI that is suitable for master data lookups. The developer responsible for this application would treat certain fields as master data, including movie language, country, genre, and ratings, because they are required for common application transactions.

    Consider the pop-up dialog that appears when a user who wants to add a new movie clicks the movie application plus the icon. The pop-up includes drop-down menus for both country and language. In this demonstration, Redis loads the values.

    demo-03

    Code

    The two code blocks below display a fetch query of master data from both MongoDB and Redis that loads the country and language drop-down values.

    Previously, if the application used MongoDB, it searched the static database to retrieve the movie's country and language values. That can be time-consuming if it's read from persistent storage—and is inefficient if the information is static.

    *** BEFORE (MongoDB)***
    *** MongoDB regular search query ***
    function getMasterCategories() {
    ...
    db.collection("masterCategories").find({
    statusCode: {
    $gt: 0,
    },
    category: {
    $in: ["COUNTRY", "LANGUAGE"],
    },
    });
    ...
    }

    Instead, the “after” views in the code blocks show that the master data can be accessed with only a few lines of code—and much faster response times.

    *** AFTER (Redis) ***
    *** Redis OM Node query ***
    function getMasterCategories() {
    ...
    masterCategoriesRepository
    .search()
    .where("statusCode")
    .gt(0)
    .and("categoryTag")
    .containOneOf("COUNTRY", "LANGUAGE");
    ...
    }

    Download the E-book

    Sensing a pattern here? The master data-lookup pattern is not the only design pattern you can use to improve application performance.

    I’m sure you’re eager to learn more, so click here to download the full e-book.

    - + \ No newline at end of file diff --git a/explore/datadog/index.html b/explore/datadog/index.html index cd56613462..03fddbc7df 100644 --- a/explore/datadog/index.html +++ b/explore/datadog/index.html @@ -4,7 +4,7 @@ Redis Enterprise Observability with Datadog | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Redis Enterprise Observability with Datadog


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis
    Profile picture for Christian Mague
    Author:
    Christian Mague, Former Principal Field Engineer at Redis

    Datadog

    Devops and SRE practitioners are already keenly aware of the importance of system reliability, as it’s one of the shared goals in every high performing organization. Defining clear reliability targets based on solid data is crucial for productive collaboration between developers and SREs. This need spans the entire infrastructure from application to backend database services.

    Service Level Objectives (SLOs) provide a powerful interface for all teams to set clear performance and reliability goals based on Service Level Indicators (SLIs) or data points. A good model is to think of the SLIs as the data and the SLO as the information one uses to make critical decisions.

    Further Read: https://cloud.google.com/blog/products/devops-sre/sre-fundamentals-slis-slas-and-slos

    Redis

    Redis is a popular multi-model NoSQL database server that provides in-memory data access speeds for search, messaging, streaming, caching, and graph—amongst other capabilities. Highly performant sites such as Twitter, Snapchat, Freshworks, GitHub, Docker, Pinterest, and Stack Overflow all look to Redis to move data in real time.

    Redis SLOs can be broken down into three main categories:

    CategoryDefinitionExample SLOExample SLI
    ThroughputNumber of operations being pushed through the service in a given time periodSystem should be capable of performing 200M operations per secondredisenterprise.total_req
    LatencyElapsed time it takes for an operationAverage write latency should not exceed 1 millisecondredis_enterprise.avg_latency
    CapacityMemory/storage/network limits of the underlying data sourceDatabase should have 20% memory overhead available to handle burstsredisenterprise.used_memory_percent

    Why Datadog?

    Running your own performance data platform is time consuming and difficult. Datadog provides an excellent platform with an open source agent to collect metrics and allows them to be displayed easily and alerted upon when necessary.

    Datadog allows you to:

    • Collect metrics from various infrastructure components out of the box
    • Display that data in easy to read dashboards
    • Monitoring performance metrics and alerting accordingly
    • Correlate log entries with metrics to quickly drill down to root causes
    • Distributed tracing

    Key Performance Indicators

    1. Latency

    Definition

    redisenterprise.avg_latency (unit: microseconds)

    This is the average amount of time that a request takes to return from the time that it first hits the Redis Enterprise proxy until the response is returned. It does not include the full time from the remote client’s perspective.

    Characteristics

    Since Redis is popular due to performance, generally you would expect most operations to return in single digit milliseconds. Tune any alerts to match your SLA. It’s generally recommended that you also measure Redis operation latency at the client side to make it easier to determine if a server slow down or an increase in network latency is the culprit in any performance issues.

    Possible Causes
    CauseFactors
    Spike in requestsCheck both the Network Traffic and Operations Per Second metrics to determine if there is a corresponding increase
    Slow-running queriesCheck the slow logs in the Redis Enterprise UI for the database
    Insufficient compute resourcesCheck to see if the CPU Usage, Memory Usage Percentage, or Evictions are increasing
    Remediation
    ActionMethod
    Increase resourcesThe database can be scaled up online by going to the Web UI and enabling clustering on the database. In extreme cases, more nodes can be added to the cluster and resources rebalanced
    Inefficient queriesRedis allows you to view slow logs with a tunable threshold. It can be viewed either in the Redis Enterprise UI or by running: redis-cli -h HOST -p PORT -a PASSWORD SLOWLOG GET 100

    2. Memory Usage Percentage

    Definition

    redisenterprise.memory_usage_percent (unit: percentage)

    This is the percentage of used memory over the memory limit set for the database.

    Characteristics

    In Redis Enterprise, all databases have a maximum memory limit set to ensure isolation in a multi-tenant environment. This is also highly recommended when running open source Redis. Be aware that Redis does not immediately free memory upon key deletion. Depending on the size of the database, generally between 80-95% is a safe threshold.

    Possible Causes
    CauseFactors
    Possible spike in activityCheck both the Network Traffic and Operations Per Second metrics to determine if there is a corresponding increase
    Database sized incorrectlyView the Memory Usage raw bytes over time to see if a usage pattern has changed
    Incorrect retention policiesCheck to see if keys are being Evicted or Expired
    Remediation
    ActionMethod
    Increase resourcesThe database memory limit can be raised online with no downtime through either the Redis Enterprise UI or the API
    Retention PolicyIn a caching use case, setting a TTL for unused data to expire is often helpful. In addition, Eviction policies can be set, however, these may often not be able to keep up in extremely high throughput environments with very tight resource constraints

    3. Cache Hit Rate

    Definition
    redisenterprise.cache_hit_rate (unit: percent)

    This is the percentage of time that Redis is accessing a key that already exists.

    Characteristics

    This metric is useful only in the caching use case and should be ignored for all other use cases. There are tradeoffs between the freshness of the data in the cache and efficacy of the cache mitigating traffic to any backend data service. These tradeoffs should be considered carefully when determining the threshold for alerting.

    Possible Causes

    This is highly specific to the application caching with no general rules that are applicable in the majority of cases.

    Remediation

    Note that Redis commands return information on whether or not a key or field already exists. For example, the HSET command returns the number of fields in the hash that were added.

    4. Evictions

    Definition
    redisenterprise.evicted_objects (unit: count)

    This is the count of items that have been evicted from the database.

    Characteristics

    Eviction occurs when the database is close to capacity. In this condition, the eviction policy starts to take effect. While Expiration is fairly common in the caching use case, Eviction from the cache should generally be a matter of concern. At very high throughput and very restricted resource use cases, sometimes the eviction sweeps cannot keep up with memory pressure. Relying on Eviction as a memory management technique should be considered carefully.

    Possible Causes

    See Memory Usage Percentage Possible Causes

    Remediation

    See Memory Usage Percentage Remediation

    Secondary Indicators

    1. Network Traffic

    Definition
    redisenterprise.ingress_bytes/redisenterprise.egress_bytes (unit: bytes)

    Counters for the network traffic coming into the database and out from the database.

    Characteristics

    While these two metrics will not help you pinpoint a root cause, network traffic is an excellent leading indicator of trouble. Changes in network traffic patterns indicate corresponding changes in database behavior and further investigation is usually warranted.

    2. Connection Count

    Definition
    redisenterprise.conns (unit: count)

    The number of current client connections to the database.

    Characteristics

    This metric should be monitored with both a minimum and maximum number of connections. The minimum number of connections not being met is an excellent indicator of either networking or application configuration errors. The maximum number of connections being exceeded may indicate a need to tune the database.

    Possible Causes
    CauseFactors
    Minimum clients not metIncorrect client configuration, network firewall, or network issues
    Maximum connections exceededClient library is not releasing connections or an increase in the number of clients
    Remediation
    ActionMethod
    Clients MisconfiguredConfirm client configurations
    Networking issueIssue the PING command from a client node TELNET to the endpoint
    Too many connectionsBe sure that you are using pooling on your client library and that your pools are sized accordingly
    Too many connectionsUsing rladmin, run: tune proxy PROXY_NUMBER threads VALUE threads VALUE

    You can access the complete list of metrics here.

    Getting Started

    Follow the steps below to set up the Datadog agent to monitor your Redis Enterprise cluster, as well as database metrics:

    Quickstart Guide:

    Prerequisites:

    • Follow this link to setup your Redis Enterprise cluster and database
    • Setup a Read-only user account by logging into your Redis Enterprise instance and visiting the “Access Control” section

    alt_text

    • Add a new user account with Cluster View Permissions.

    alt_text

    Step 1. Set Up a Datadog Agent

    Before we jump into the installation, let’s look at the various modes that you can run the Datadog agent in:

    • External Monitor Mode
    • Localhost Mode

    External Monitor Mode

    alt_text

    In external monitor mode, a Datadog agent running outside of the cluster can monitor multiple Redis Enterprise clusters, as shown in the diagram above.

    Localhost Mode

    Using localhost mode, the integration can be installed on every node of a Redis Enterprise cluster. This allows the user to correlate OS level metrics with Redis-specific metrics for faster root cause analysis. Only the Redis Enterprise cluster leader will submit metrics and events to Datadog. In the event of a migration of the cluster leader, the new cluster leader will begin to submit data to Datadog.

    alt_text

    For this demo, we will be leveraging localhost mode as we just have two nodes to configure.

    Step 2. Launch the Datadog agent on the Master node

    Pick up your preferred OS distribution and install the Datadog agent

    alt_text

    Run the following command to install the integration wheel with the Agent. Replace the integration version with 1.0.1.

     datadog-agent integration install -t datadog-redisenterprise==<INTEGRATION_VERSION>

    Step 3. Configuring Datadog configuration file

    Copy the sample configuration and update the required sections to collect data from your Redis Enterprise cluster:

    For Localhost Mode

    The following minimal configuration should be added to the Enterprise Master node.

     sudo vim /etc/datadog-agent/conf.d/redisenterprise.d/conf.yaml
     #################################################################
    # Base configuration
    init_config:

    instances:
    - host: localhost
    username: user@example.com
    password: secretPassword
    port: 9443

    Similarly, you need to add the edit the configuration file for the Enterprise Follower to add the following:

     sudo vim /etc/datadog-agent/conf.d/redisenterprise.d/conf.yaml
      #################################################################
    # Base configuration
    init_config:

    instances:
    - host: localhost
    username: user@example.com
    password: secretPassword
    port: 9443

    For External Monitor Mode

    The following configuration should be added to the Monitor node

    #  Base configuration
    init_config:

    instances:
    - host: cluster1.fqdn
    username: user@example.com
    password: secretPassword
    port: 9443

    - host: cluster2.fqdn
    username: user@example.com
    password: secretPassword
    port: 9443

    Step 4. Restart the Datadog Agent service

     sudo service datadog-agent restart

    Step 5. Viewing the Datadog UI

    Find the Redis Enterprise Integration under the Integration Menu:

    alt_text

    Displaying the host reporting data to Datadog:

    alt_text

    Listing the Redis Enterprise dashboards:

    alt_text

    Host details under Datadog Infrastructure list:

    alt_text

    Datadog dashboard displaying host metrics of the 1st host (CPU, Memory Usage, Load Average etc):

    alt_text

    Datadog dashboard displaying host metrics of the 2nd host:

    alt_text

    Step 6. Verifying the Datadog Agent Status

    Running the datadog-agent command shows that the Redis Enterprise integration is working correctly.

     sudo datadog-agent status
     redisenterprise (1.0.1)
    -----------------------
    Instance ID: redisenterprise:ef4cd60aadac5744 [OK]
    Configuration Source: file:/etc/datadog-agent/conf.d/redisenterprise.d/conf.yaml
    Total Runs: 2
    Metric Samples: Last Run: 0, Total: 0
    Events: Last Run: 0, Total: 0
    Service Checks: Last Run: 0, Total: 0
    Average Execution Time : 46ms
    Last Execution Date : 2021-10-28 17:27:10 UTC (1635442030000)
    Last Successful Execution Date : 2021-10-28 17:27:10 UTC (1635442030000)

    Redis Enterprise Cluster Top View

    alt_text

    Let’s run a memory benchmark tool called redis-benchmark to simulate an arbitrary number of clients connecting at the same time and performing actions on the server, measuring how long it takes for the requests to be completed.

     memtier_benchmark --server localhost -p 19701 -a password
    [RUN #1] Preparing benchmark client...
    [RUN #1] Launching threads now...

    alt_text

    This command instructs memtier_benchmark to connect to your Redis Enterprise database and generates a load doing the following:

    • Write objects only, no reads.
    • Each object is 500 bytes.
    • Each object has random data in the value.
    • Each key has a random pattern, then a colon, followed by a random pattern.

    Run this command until it fills up your database to where you want it for testing. The easiest way to check is on the database metrics page.

     memtier_benchmark --server localhost -p 19701 -a Oracle9ias12# -R -n allkeys -d 500 --key-pattern=P:P --ratio=1:0
    setting requests to 50001
    [RUN #1] Preparing benchmark client...
    [RUN #1] Launching threads now...

    alt_text

    The Datadog Events Stream shows an instant view of your infrastructure and services events to help you troubleshoot issues happening now or in the past. The event stream displays the most recent events generated by your infrastructure and the associated monitors, as shown in the diagram below.

    alt_text

    References:

    - + \ No newline at end of file diff --git a/explore/import/index.html b/explore/import/index.html index 6bc40f519c..610eac7ae4 100644 --- a/explore/import/index.html +++ b/explore/import/index.html @@ -4,7 +4,7 @@ How to Import data into a Redis database | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    How to Import data into a Redis database


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Redis offers multiple ways to import data into a database; from an file, an script or from an existing Redis database.

    Import using redis-cli script

    1. Create a simple file users.redis with all the commands you want to run

      HSET 'user:001' first_name 'John' last_name 'doe' dob '12-JUN-1970'
      HSET 'user:002' first_name 'David' last_name 'Bloom' dob '03-MAR-1981'
    2. Use the redis-cli tool to execute the script

      redis-cli -h localhost -p 6379 < users.redis

    This approach will only run the commands and will not impact the existing data, except if you modify existing keys in the script.

    Sample dataset: You can find sample dataset ready to be imported using this method in the https://github.com/redis-developer/redis-datasets repository.


    Restore an RDB file

    If you have an RDB file dump.rdb that contains the data you want you can use this file to create a new database

    1. Copy the dump.rdb file into the Redis working directory

      If you do not know what it is folder you can run the command CONFIG get dir where your Redis instance is up and running

    2. Start the Redis service with the redis-server

    3. The file dump.rdb is automatically imported.

    4. Connect to the database using redis-cli or any other client, to check that data have been imported. (for example SCAN)

    - Warning: Importing data erases all existing content in the database.


    Import & Synchronize using RIOT

    Redis Input/Output Tools (RIOT) is a set of import/export command line utilities for Redis:

    • RIOT DB: migrate from an RDBMS to Redis, Search, JSON, ...
    • RIOT File: bulk import/export data from/to files.
    • RIOT Gen: generate sample Redis datasets for new feature development and proof of concept.
    • RIOT Redis: live replication from any Redis database (including AWS Elasticache) to another Redis database.
    • RIOT Stream: import/export messages from/to Kafka topics.

    Import data into Redis Enterprise

    You can easily import data into Redis Enterprise and Redis Enterprise Cloud, take a look to the following documentation:

    - + \ No newline at end of file diff --git a/explore/index.html b/explore/index.html index 11d3c0e39e..0766779191 100644 --- a/explore/index.html +++ b/explore/index.html @@ -4,7 +4,7 @@ Explore Your Data | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Explore Your Data

    The following links provides you with the available options to explore a new Redis database either on the Cloud or using local software.

    Redis in-memory database. How it works and why you should use it.
    An intuitive and efficient GUI for Redis
    An out-of-the-box predefined build Grafana dashboard for Redis
    How to create Grafana Dashboards for Redis Enterprise cluster in 5 Minutes
    A set of import/export command line utilities for Redis
    - + \ No newline at end of file diff --git a/explore/redisdatasource/index.html b/explore/redisdatasource/index.html index ea877a3da2..3e7b6f9c81 100644 --- a/explore/redisdatasource/index.html +++ b/explore/redisdatasource/index.html @@ -4,7 +4,7 @@ How to add Redis as a datasource in Grafana and build customize dashboards for Analytics | The Home of Redis Developers - + @@ -16,7 +16,7 @@ In our case, we will be using redis-datasource.

     docker run -d -p 3000:3000 --name=grafana -e "GF_INSTALL_PLUGINS=redis-datasource" grafana/grafana

    Step 3. Accessing the grafana dashboard

    Open https://IP:3000 to access grafana. The default username/password is admin/admin.

    grafana

    Step 4. Click "Configuration"

    grafana

    Step 5. Add Redis as a Data Source

    grafana

    Step 6. Select "Redis" as data source type

    grafana

    Step 7. Add Redis Database name, Endpoint URL and password

    Assuming that you already have Redis server and database up and running in your infrastructure. You can also leverage Redis Enterprise Cloud as showcased in the below example.

    grafana

    Step 8. Click "Import" under Dashboard

    grafana

    Step 9.Access the Redis datasource Dashboard

    grafana

    Supported commands

    Data Source supports various Redis commands using custom components and provides a unified interface to query any command.

    Query

    Further References

    - + \ No newline at end of file diff --git a/explore/redisexplorer/index.html b/explore/redisexplorer/index.html index 8b303949c4..4909991d4b 100644 --- a/explore/redisexplorer/index.html +++ b/explore/redisexplorer/index.html @@ -4,7 +4,7 @@ How to create Grafana Dashboards for Redis Enterprise cluster in 5 Minutes | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    How to create Grafana Dashboards for Redis Enterprise cluster in 5 Minutes


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Redis Enterprise clusters are a set of nodes, typically two or more, providing database services. Clusters are inherently multi-tenant, and a single cluster can manage multiple databases accessed through individual endpoints. Redis Enterprise software provides REST API to retrieve information about cluster, database , nodes and metrics.

    Redis Explorer plugin is the latest plugin in the Grafana Labs that adds support for Redis Enterprise software.It is a plugin for Grafana that connects to Redis Enterprise software clusters using REST API. It provides application pages to add Redis Data Sources for managed databases and dashboards to see cluster configuration.

    my image

    Redis Explorer allows you to create the below dashboard over Grafana:

    Enterprise Clusters Dashboard

    Enterprise Clusters dashboard provides basic information about the cluster, license, and displays most important metrics.

    Image

    Cluster Overview Dashboard

    Cluster Overview dashboard provides the most important information and metrics for the selected cluster.

    Image

    Cluster Nodes Dashboard

    Cluster Nodes dashboard provides information and metrics for each node participating in the cluster.

    Image

    Cluster Databases Dashboard

    Cluster Databases dashboard provides information and metrics for each databases managed by cluster.

    Image

    Getting Started

    Pre-requisite

    • Grafana 8.0+ is required for Redis Explorer 2.X.
    • Grafana 7.1+ is required for Redis Explorer 1.X.
    • Docker
    • Redis Enterprise Cluster

    Step 1. Setup Redis Enterprise Cluster

    Follow these steps to setup Redis Enterprise cluster nodes

    My Image

    My Image

    Step 2. Install Grafana

     brew install grafana

    Step 3. Install redis-explorer-app

    Use the grafana-cli tool to install from the command line: Redis Application plugin and Redis Data Source will be auto-installed as dependencies.

     grafana-cli plugins install redis-explorer-app

    Step 4. Using Docker

    You can even run Redis Explorer plugin using Docker

     docker run -p 3000:3000 --name=grafana -e "GF_INSTALL_PLUGINS=redis-explorer-app" grafana/grafana

    Open https://IP:3000 to access grafana. The default username/password is admin/admin.

    Step 5. Log in to Grafana

    My Image

    Step 6. Choose Redis Explorer in the sidebar

    Once you add the datasource, you should be able to choose the right option

    My Image

    Step 7. Getting the Redis Enterprise Cluster Overview

    My Image

    Step 8. Displaying the Redis Enterprise Cluster Nodes

    My Image

    Further References

    Redis Launchpad
    - + \ No newline at end of file diff --git a/explore/redisinsight/autodiscover/index.html b/explore/redisinsight/autodiscover/index.html index 839323c919..c949eb22d3 100644 --- a/explore/redisinsight/autodiscover/index.html +++ b/explore/redisinsight/autodiscover/index.html @@ -4,7 +4,7 @@ Utilize Elasticache Auto Discovery For Redis with RedisInsight | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    Utilize Elasticache Auto Discovery For Redis with RedisInsight


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    RedisInsight is a 100% free Redis GUI that allows you to visualise, monitor, and optimize while developing your applications with Redis. It provides an intuitive and efficient GUI for Redis allowing developers like you to interact with your databases and manage your data. RedisInsight comes with the compatibility to connect to your database through the Sentinel instance too. Please note that RedisInsight v2.0 is an open source visual tool built by Redis that lets you do both GUI- and CLI-based interactions with your Redis database.

    RedisInsight lets you automatically add Redis Enterprise Software and Redis Enterprise Cloud databases. RedisInsight also allows you to automatically discover Elasticache Redis caches.

    note

    ElastiCache Redis caches cannot be accessed from outside the VPC, as they don’t have public IP addresses assigned to them.If you want to work with ElastiCache Redis caches with RedisInsight, you can either setup an SSH tunnel between RedisInsight and your Elasticache instance, in case you're not using Redis Cluster.

    This tutorial shows how to:

    • Setup and configure Amazon Elasticache
    • Configure the VPC
    • Configuring the security groups
    • Configure and setup Amazon EC2
    • Create and configure IAM role
    • Assign the permissions
    • Connect to Elasticache from EC2 instance
    • Setup RedisInsight
    • Access RedisInsight
    • Autodiscover Elasticache Instance

    Step 1. Setup and configure Amazon Elasticache

    Login to AWS Management Console and click "Get Started now"

    elasticache

    Choose "Redis" as the cluster engine

    elasticache

    Configure Redis settings:

    elasticache

    Copy and save the Elasticache primary endpoint URL:

    elasticache

    Step 2. Configure the VPC

    Configure and chose VPC that has your ElastiCache instances

    elasticache

    Step 3. Configure the Security Groups

    elasticache

    Configure inbound and outbound rules to allow RedisInsight and Redis ports:

    elasticache

    Step 4. Configure and setup Amazon EC2

    elasticache

    Step 5. Create and configure IAM role

    You can use the AWS Management Console to create a role that an IAM user can assume

    elasticache

    Under Select type of trusted entity, choose EC2. In other words, the role is used by an EC2 instance

    elasticache

    Click “Next”.

    Step 6. Assign the permissions

    Assign the below permissions:

    • AmazonS3ReadOnlyAccess
    • AmazonElastiCacheReadOnlyAccess

    elasticache

    Step 7. Connect to Elasticache from EC2 instance

    Use the redis-cli command to connect to the remote Amazon Elasticache for Redis server endpoint URL.

     ubuntu@ip-10-0-0-254:~$ redis-cli -h redisinsightdemo.8cfnjo.ng.0001.use1.cache.amazonaws.com -p 6379
    redisinsightdemo.8cfnjo.ng.0001.use1.cache.amazonaws.com:6379>

    Step 8. Setup RedisInsight

    In order to access the RedisInsight GUI, run the following Docker command:

     ubuntu@ip-10-0-0-254:~$ sudo docker run -v redisinsight:/db -p 8001:8001 redislabs/redisinsight:latest
    Unable to find image 'redislabs/redisinsight:latest' locally
    latest: Pulling from redislabs/redisinsight
     sudo docker ps
    CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
    caf3d674fb81 redislabs/redisinsight:latest "bash ./docker-entry…" 4 seconds ago Up 3 seconds 0.0.0.0:8001->8001/tcp, :::8001->8001/tcp cool_pascal

    Step 9. Access RedisInsight

    To access the RedisInsight GUI, open your preferred browser and access https://localhost:8001

    elasticache elasticache

    Step 10. Autodiscover Elasticache Instance

    note

    In case you encounter the below error message:

    This EC2 instance does not have permissions to discover your ElastiCache instances. To grant permission, create an IAM role with the DescribeCacheClusters permission and attach the role to this EC2 instance.

    You might have to attach IAM role to the instance as shown below:

    elasticache

    Now you can should be able to autodiscover Elasticache

    elasticache

    elasticache

    Add the selected instance:

    elasticache

    Add the discovered instance:

    elasticache

    References

    Redis Launchpad
    - + \ No newline at end of file diff --git a/explore/redisinsight/browser/index.html b/explore/redisinsight/browser/index.html index 78caeacb8f..71f858c545 100644 --- a/explore/redisinsight/browser/index.html +++ b/explore/redisinsight/browser/index.html @@ -4,7 +4,7 @@ Visualize Redis database keys using RedisInsight Browser Tool | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    Visualize Redis database keys using RedisInsight Browser Tool


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    RedisInsight is a 100% free Redis GUI that allows you to visualise, monitor, and optimize while developing your applications with Redis. It provides an intuitive and efficient GUI for Redis allowing developers like you to interact with your databases and manage your data.

    RedisInsight Browser lets you explore keys in your redis server. You can add, edit and delete a key. You can even update the key expiry and copy the key name to be used in different places of the application.

    In order to understand the capabilities of the browser tool, let us take a simple example and demonstrate capabilities of each of browse tool options:

    Step 1: Create a Redis Database

    Follow this link to create Redis database

    Step 2: Download RedisInsight

    To install RedisInsight on your local system, you need to first download the software from the Redis website.

    Click this link to access a form that allows you to select the operating system of your choice.

    My Image

    Run the installer. After the web server starts, open http://YOUR_HOST_IP:8001 and add a Redis database connection.

    Select "Connect to a Redis database" My Image

    Enter the requested details, including Name, Host (endpoint), Port, and Password. Then click “ADD REDIS DATABASE”.

    Step 3: Open "Browser Tool"

    alt_text

    Step 4: Importing keys

    Let us import a user database( 6k keys). This dataset contains users stored as Redis Hash.

    Users

    The user hashes contain the following fields:

    • user:id : The key of the hash.
    • first_name : First Name.
    • last_name : Last name.
    • email : email address.
    • gender : Gender (male/female).
    • ip_address : IP address.
    • country : Country Name.
    • country_code : Country Code.
    • city : City of the user.
    • longitude : Longitude of the user.
    • latitude : Latitude of the user.
    • last_login : EPOC time of the last login.

    Step 5: Cloning the repository

     git clone https://github.com/redis-developer/redis-datasets
    cd redis-datasets/user-database

    Importing the user database:

     redis-cli -h localhost -p 6379 < ./import_users.redis

    Refresh the keys database by clicking as shown below:

    Click on “Scan More” to scan all 6k keys

    alt_text

    You can get a real-time view of the data in your Redis database as shown below:

    alt_text

    Select any key in the key database and the results gets displayed in the right hand side that includes Fields and values.

    alt_text

    Step 6. Adding a new key

    alt_text

    Enter key name, field and value.

    alt_text

    Step 7. Searching the hash key

    You can search the key by “user:9999” and you will see the result.

    alt_text

    Let us add fields for user:9999 as shown below:

    You can even search by adding “*” and typing the first few letters.

    alt_text

    Step 8: Filter keys by Data Type

    alt_text

    Step 9: Setting up the Expiry value

    Let us set it to 2 seconds and you won’t be able to search for the same key as it gets expired.

    alt_text

    Step 10: Using CLI

    RedisInsight CLI lets you run commands against a redis server. You don’t need to remember the syntax - the integrated help shows you all the arguments and validates your command as you type.

    > HMGET user:3333 first_name last_name city

    1) "Myrlene"
    2) "McGrane"
    3) "Qinghu"

    alt_text

    Further References

    Redis Launchpad
    - + \ No newline at end of file diff --git a/explore/redisinsight/cluster/index.html b/explore/redisinsight/cluster/index.html index 2299f67f83..331e169e77 100644 --- a/explore/redisinsight/cluster/index.html +++ b/explore/redisinsight/cluster/index.html @@ -4,7 +4,7 @@ Manage Your Redis Cluster using RedisInsight Cluster Management Tool | The Home of Redis Developers - + @@ -21,7 +21,7 @@ cluster cluster

    Step 18. Removing the node from the Cluster

    cluster

    Step 19. Failover

    In order to upgrade the Redis process of one of the master nodes it is a good idea to failover it in order to turn it into a replicas with minimal impact on availability.

    cluster

    Also, RedisInsight Cluster Management tool allows you to rebalance your cluster by manually defining the slot coverage as shown below:

    cluster cluster

    - + \ No newline at end of file diff --git a/explore/redisinsight/getting-started/index.html b/explore/redisinsight/getting-started/index.html index c9dd800a18..cf9f4123bf 100644 --- a/explore/redisinsight/getting-started/index.html +++ b/explore/redisinsight/getting-started/index.html @@ -4,7 +4,7 @@ Getting Started with RedisInsight | The Home of Redis Developers - + @@ -19,7 +19,7 @@ My Image

    Enter the requested details, including Name, Host (endpoint), Port, and Password in the form, as shown below. You can skip username for now. Then click “ADD REDIS DATABASE”:

    My Image

    Step 5. Run the Redis CLI

    Finally, although RedisInsight is a great GUI, sometimes you want to work directly in the command-line interface (CLI). To do so, click “CLI” in the menu on the left side of the RedisInsight UI:

    My Image

    Then paste the appropriate Redis commands in the command section, marked with “>>” as shown below, and press Enter.

    My Image

    You can see the output displayed at the top of the screen. If it says “OK,” the command was executed successfully.

    RedisInsight Overview (RedisConf'21)

    Further References

    Redis Launchpad
    - + \ No newline at end of file diff --git a/explore/redisinsight/index.html b/explore/redisinsight/index.html index 7d888e2722..c064a17713 100644 --- a/explore/redisinsight/index.html +++ b/explore/redisinsight/index.html @@ -4,7 +4,7 @@ RedisInsight Developer Hub for Redis Interactive Tutorials | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    RedisInsight Developer Hub for Redis Interactive Tutorials

    Getting started with RedisInsight
    Visualize Redis database keys using the RedisInsight Browser Tool
    Debug Redis using the RedisInsight Slowlog Debugging Tool
    Optimize & Analyze Redis using the RedisInsight Memory Analyzer Tool
    Manage Your Redis Cluster using the RedisInsight Cluster Management Tool
    Use Redis Streams Consumer Groups with RedisInsight
    Analyze Your Redis Commands Using The Monitor Command
    Query, Visualize and Manipulate Graphs using the Graph Browser Visualization Tool
    Write Your Serverless Redis function using the Triggers and Functions Browser Tool
    Manage Redis time-series data using the Time Series Browser Tool
    Perform Database Search and Analytics using the Search Browser Tool
    Utilize Elasticache Auto Discovery For Redis with RedisInsight
    - + \ No newline at end of file diff --git a/explore/redisinsight/memoryanalyzer/index.html b/explore/redisinsight/memoryanalyzer/index.html index a78d74fd47..dd96e7b185 100644 --- a/explore/redisinsight/memoryanalyzer/index.html +++ b/explore/redisinsight/memoryanalyzer/index.html @@ -4,7 +4,7 @@ Optimize & Analyze Redis using RedisInsight Memory Analyzer Tool | The Home of Redis Developers - + @@ -19,7 +19,7 @@ The trade off of converting large keys to smaller keys is that large Keys were more descriptive then shortened keys, hence when reading through your database you may find the keys less relatable. Read More Key patterns that need to be modified: key:*

    alt_text

    Memory Analyzer

    Memory Analyzer lets you search a key or key patterns and get related information regarding it with other stats. You can apply various filters and aggregations using our advance filters feature.

    When the analyze-memory button is clicked, it connects to the redis instance and takes a point-in-time snapshot of the database. Here’s a link that deep dive into SYNC and DUMP approach

    Considering 1 million keys only using the SET command, you can view the memory analyzer section and click “Advanced Filters”.

    alt_text

    The advanced filters allow you to choose data type, encoding, memory, group by data types, encoding, aggregate etc.

    alt_text

    You can check memory usage by key:

    alt_text

    - + \ No newline at end of file diff --git a/explore/redisinsight/profiler/index.html b/explore/redisinsight/profiler/index.html index 07ee9a5acc..fac62081c0 100644 --- a/explore/redisinsight/profiler/index.html +++ b/explore/redisinsight/profiler/index.html @@ -4,7 +4,7 @@ RedisInsight Profiler Tool - Analyze Your Redis Commands Using Redis Monitor Command | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    RedisInsight Profiler Tool - Analyze Your Redis Commands Using Redis Monitor Command


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    RedisInsight profiler analyzes your Redis commands that are being run on the Redis server in real-time. The tool provides you detailed information about the number of commands processed, commands/second and number of connected clients. It also gives information about top prefixes, top keys and top commands.

    It basically runs the Redis MONITOR command and generates a summarized view. MONITOR is a debugging command that streams back every command processed by the Redis server. It can help in understanding what is happening to the database. This command can both be used via redis-cli and via telnet.All the commands sent to the redis instance are monitored for the duration of the profiling. The ability to see all the requests processed by the server is useful in order to spot bugs in an application both when using Redis as a database and as a distributed caching system.

    caution

    Because MONITOR streams back all commands, its use comes at a cost. Running monitor command is dangerous to the performance of your production server, hence the profiler is run for a maximum time of 5 minutes, if the user has not stopped it in between. This is to avoid overload on the Redis server.

    Follow the below instructions to test drive RedisInsight profiler tool:

    Step 1. Create Redis database with Redis Time Series module enabled

    Visit https://developer.redis.com/create/rediscloud and create a Redis database. Follow these steps to enable Redis Time Series module on Redis Enterprise Cloud

    alt_text

    You can use Redis CLI to connect to the remote Redis Enterprise cloud database. You can check memory usage with the Redis INFO command.

    tip

    RedisInsight allows you to add a Redis Sentinel database too. Refer to the documentation to learn more.

    Step 2: Download RedisInsight

    tip

    RedisInsight v2.0 is an open source visual tool that lets you do both GUI- and CLI-based interactions with your Redis database. It is an open source visual tool that lets you do both GUI- and CLI-based interactions with your Redis database . It is a desktop manager that provides an intuitive and efficient GUI for Redis, allowing you to interact with your databases, monitor, and manage your data.

    Refer to these tutorials to learn more about this latest release.

    To install RedisInsight on your local system, you need to first download the software from the Redis website.

    Click this link to access a form that allows you to select the operating system of your choice.

    My Image

    Run the installer. After the web server starts, open http://YOUR_HOST_IP:8001 and add a Redis database connection.

    Select "Connect to a Redis database" My Image

    Enter the requested details, including Name, Host (endpoint), Port, and Password. Then click “ADD REDIS DATABASE”.

    Step 3. Cloning the GITHUB repo

    We will be using a python script to fetch sensor data from one of the IoT Edge sensor devices (such as BME680 sensors) and then push the sensor values to the Redis Cloud database.

    $ git clone https://github.com/redis-developer/redis-datasets/tree/master/redistimeseries
    cd redistimeseries/realtime-sensor-jetson
    import bme680
    import time
    import datetime
    import csv
    import argparse
    import redis


    print("""read-sensor.py - Displays temperature, pressure, humidity, and gas.
    Press Ctrl+C to exit!
    """)

    try:
    sensor = bme680.BME680(bme680.I2C_ADDR_PRIMARY)
    except IOError:
    sensor = bme680.BME680(bme680.I2C_ADDR_SECONDARY)

    # These calibration data can safely be commented
    # out, if desired.

    print('Calibration data:')
    for name in dir(sensor.calibration_data):

    if not name.startswith('_'):
    value = getattr(sensor.calibration_data, name)

    if isinstance(value, int):
    print('{}: {}'.format(name, value))

    # These oversampling settings can be tweaked to
    # change the balance between accuracy and noise in
    # the data.

    sensor.set_humidity_oversample(bme680.OS_2X)
    sensor.set_pressure_oversample(bme680.OS_4X)
    sensor.set_temperature_oversample(bme680.OS_8X)
    sensor.set_filter(bme680.FILTER_SIZE_3)
    sensor.set_gas_status(bme680.ENABLE_GAS_MEAS)

    print('\n\nInitial reading:')
    for name in dir(sensor.data):
    value = getattr(sensor.data, name)

    if not name.startswith('_'):
    print('{}: {}'.format(name, value))

    sensor.set_gas_heater_temperature(320)
    sensor.set_gas_heater_duration(150)
    sensor.select_gas_heater_profile(0)

    # Up to 10 heater profiles can be configured, each
    # with their own temperature and duration.
    # sensor.set_gas_heater_profile(200, 150, nb_profile=1)
    # sensor.select_gas_heater_profile(1)


    parser = argparse.ArgumentParser()
    parser.add_argument("--port", type=int,
    help="redis instance port", default=6379)
    parser.add_argument(
    "--password", type=int, help="redis instance password", default=None
    )
    parser.add_argument(
    "--verbose", help="enable verbose output", action="store_true")
    parser.add_argument("--host", type=str,
    help="redis instance host", default="127.0.0.1")


    args = parser.parse_args()

    # redis setup
    redis_obj = redis.Redis(host=args.host, port=args.port, password=args.password)
    temperature_key = "ts:temperature"
    pressure_key = "ts:pressure"
    humidity_key = "ts:humidity"

    print('\n\nPolling:')
    try:
    while True:
    if not sensor.get_sensor_data():
    print('Can not access sensor data')
    continue

    output = '{0:.2f} C,{1:.2f} hPa,{2:.2f} %RH'.format(
    sensor.data.temperature,
    sensor.data.pressure,
    sensor.data.humidity)

    if not sensor.data.heat_stable:
    print('Heat unstable: ' + output)
    continue

    print('{0},{1} Ohms'.format(
    output,
    sensor.data.gas_resistance))

    date = datetime.datetime.now()
    timestamp = int(date.timestamp() * 1000)

    # Create pipeline
    pipe = redis_obj.pipeline()

    pipe.execute_command(
    "ts.add", temperature_key, timestamp, sensor.data.temperature
    )

    pipe.execute_command(
    "ts.add", pressure_key, timestamp, sensor.data.pressure
    )

    pipe.execute_command("ts.add", humidity_key,
    timestamp, sensor.data.humidity)

    # Execute pipeline
    pipe.execute()

    time.sleep(1)

    except KeyboardInterrupt:
    pass

    The complete walkthrough of this python script is explained here.

    Step 4: Execute the sensor script

    Let us execute the script using the command line:

    $ sudo python3 sensorloader2.py --host Endpoint_of_Redis_enterprise_Cloud --port port

    Run the monitor command to verify if sensor values are being fetched or not.(Don’t run this command in the production environment)

    redis-17316.c251.east-us-mz.azure.cloud.redislabs.com:17316> monitor
    OK
    1622212328.833139 [0 122.171.186.213:59471] "monitor"
    1622212329.865158 [0 70.167.220.160:50378] "MULTI"
    1622212329.865158 [0 70.167.220.160:50378] "ts.add" "ts:temperature" "1622212329847" "35.67"
    1622212329.865158 [0 70.167.220.160:50378] "ts.add" "ts:pressure" "1622212329847" "957.52"
    1622212329.865158 [0 70.167.220.160:50378] "ts.add" "ts:humidity" "1622212329847" "11.111"
    1622212329.865158 [0 70.167.220.160:50378] "EXEC"
    1622212330.941178 [0 70.167.220.160:50378] "MULTI"
    1622212330.941178 [0 70.167.220.160:50378] "ts.add" "ts:temperature" "1622212330920" "35.68"
    1622212330.941178 [0 70.167.220.160:50378] "ts.add" "ts:pressure" "1622212330920" "957.51"
    1622212330.941178 [0 70.167.220.160:50378] "ts.add" "ts:humidity" "1622212330920" "11.111"
    1622212330.941178 [0 70.167.220.160:50378] "EXEC"

    Step 5: Accessing the Redis Time Series Keys

    Follow these steps to connect to the database using RedisInsight. Once you are connected to RedisInsight GUI, you can verify the 3 Redis Time Series keys:

    • ts:temperature
    • ts:pressure
    • ts:humidity

    alt_text

    Step 6: Running Redis Time Series specific queries**

    alt_text

    Please note that In Redis Time Series, only TS.RANGE and TS.MRANGE are supported as of the current release. In the next release, TS.REVRANGE and TS.MREVRANGE will be supported too.

    alt_text

    Step 7. Initiate the Profiler

    Click “Start Profiler” while sensor data is continuously being pushed to Redis database.

    alt_text

    Let the profiler tool run for next 1-2 minutes.

    alt_text

    Stop the profiler to see the results as shown below:

    alt_text

    Hence, the profiler provides the below statistical details:

    • How many commands were processed
    • Number of connected clients
    • Rate at which the commands were executed
    • Top key patterns (key patterns followed by number of commands)
    • Top Keys
    • Top Commands & their frequency

    Redis Launchpad
    - + \ No newline at end of file diff --git a/explore/redisinsight/redisearch/index.html b/explore/redisinsight/redisearch/index.html index d7fbb95de5..1953bc1682 100644 --- a/explore/redisinsight/redisearch/index.html +++ b/explore/redisinsight/redisearch/index.html @@ -4,7 +4,7 @@ Perform Database Search and Analytics using Redis Search Browser Tool | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    Perform Database Search and Analytics using Redis Search Browser Tool


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    A full-featured pure desktop GUI client, RedisInsight supports Redis Search. Redis Search is a powerful indexing, querying, and full-text search engine for Redis. It is one of the most mature and feature-rich Redis modules.With RedisInsight, the below functionalities are possible

    • Multi-line for building queries
    • Added ability to submit query with ‘ctrl + enter’ in single line mode
    • Better handling of long index names in index selector dropdown
    • Fixed bug with pagination on queries with whitespace in the query string
    • Support Aggregation
    • Support Fuzzy logic
    • Simple and complex conditions
    • Sorting
    • Pagination
    • Counting

    Redis Search allows you to quickly create indexes on datasets (Hashes), and uses an incremental indexing approach for rapid index creation and deletion. The indexes let you query your data at lightning speed, perform complex aggregations, and filter by properties, numeric ranges, and geographical distance.

    Step 1. Create Redis database

    Follow this link to create Redis Stack database that comes with Redis Search

    Step 2: Download RedisInsight

    To install RedisInsight on your local system, you need to first download the software from the Redis website.

    Click this link to access a form that allows you to select the operating system of your choice.

    My Image

    Run the installer. After the web server starts, open http://YOUR_HOST_IP:8001 and add a Redis database connection.

    Select "Connect to a Redis database" My Image

    Enter the requested details, including Name, Host (endpoint), Port, and Password. Then click “ADD REDIS DATABASE”.

    alt_text

    We will look at 2 datasets - one is OpenBeerDB and other is Movie datasets. Let us begin with OpenBeerDB sample dataset.

    Step 3. OpenBeerDB sample dataset

    To demonstrate Redis Search, we will use OpenbeerDB dataset. The dataset is available publicly for general public under openbeerdb.com

    alt_text

    Let us clone the repository to access the dataset:

    $ git clone https://github.com/redis-developer/redis-datasets
    cd redis-datasets/redisearch/openbeerdb

    Step 4. Installing prerequisite packages

    $ brew install python3
    $ pip3 install -r requirements.txt

    Step 5. Importing the data

    $ python3 import.py --url redis://localhost:6379
    Importing categories…
    Importing styles...
    Importing breweries...
    Adding beer data to Redis Search..

    Step 6: Choose “Redis Search” under RedisInsight browser tool

    alt_text

    Run the below query:

    "@abv:[5 6]"

    alt_text

    You can click on “{:} “ to get a JSON view as shown below:

    alt_text

    alt_text

    You can download the data in CSV format.

    alt_text

    alt_text

    Query: All beers with ABV higher than 5% but lower than 6%

    The beers are added to the Redis Search index weighted by ABV. So by default, the results will be ordered by ABV highest to lowest. Both ABV and IBU are sortable, so you can order results by either of these fields using sortby in the query

    alt_text

    Query: All beers with ABV higher than 5% but lower than 6% within the specified limits

    "@abv:[5 6]" limit 0 100

    alt_text

    Query: Find out Irish Ale and German Ale beers with ABV greater than 9%:

    alt_text

    Step 7. Using AGGREGATION

    Aggregations are a way to process the results of a search query, group, sort and transform them - and extract analytic insights from them. Much like aggregation queries in other databases and search engines, they can be used to create analytics reports, or perform Faceted Search style queries.

    For example, indexing a web-server's logs, we can create a report for unique users by hour, country or any other breakdown; or create different reports for errors, warnings, etc.

    Let's run the aggregation query

    FT.AGGREGATE "beerIdx" "@abv:[5 6]" limit 0 1060 GROUPBY 1 @breweryid

    alt_text

    Let us look at Movie sample dataset too.

    Step 8. Create Redis database

    Follow this link to create Redis Stack database using Docker container that comes with Redis Search module enabled

    Step 9. Install RedisInsight

    Follow this link to setup RedisInsight locally in your system

    alt_text

    Step 10. Movie Sample Database

    In this project you will use a simple dataset describing movies, for now, all records are in English. You will learn more about other languages in another tutorial.

    A movie is represented by the following attributes:

    • movie_id : The unique ID of the movie, internal to this database
    • title : The title of the movie.
    • plot : A summary of the movie.
    • genre : The genre of the movie, for now a movie will only have a single genre.
    • release_year : The year the movie was released as a numerical value.
    • rating : A numeric value representing the public's rating for this movie.
    • votes : Number of votes.
    • poster : Link to the movie poster.
    • imdb_id : id of the movie in the IMDB database.

    Key and Data structure

    As a Redis developer, one of the first things to look when building your application is to define the structure of the key and data (data design/data modeling).

    A common way of defining the keys in Redis is to use specific patterns in them. For example in this application where the database will probably deal with various business objects: movies, actors, theaters, users, ... we can use the following pattern:

    • business_object:key

    For example:

    • movie:001 for the movie with the id 001
    • user:001 the user with the id 001

    and for the movies information you should use a Redis Hash.

    A Redis Hash allows the application to structure all the movie attributes in individual fields; also Redis Search will index the fields based on the index definition.

    Step 11. Insert Movies

    It is time now to add some data into your database, let's insert a few movies, using redis-cli or RedisInsight.

    Once you are connected to your Redis instance run the following commands:

    > HSET movie:11002 title "Star Wars: Episode V - The Empire Strikes Back" plot "After the Rebels are brutally overpowered by the Empire on the ice planet Hoth, Luke Skywalker begins Jedi training with Yoda, while his friends are pursued by Darth Vader and a bounty hunter named Boba Fett all over the galaxy." release_year 1980 genre "Action" rating 8.7 votes 1127635 imdb_id tt0080684


    > HSET movie:11003 title "The Godfather" plot "The aging patriarch of an organized crime dynasty transfers control of his clandestine empire to his reluctant son." release_year 1972 genre "Drama" rating 9.2 votes 1563839 imdb_id tt0068646


    > HSET movie:11004 title "Heat" plot "A group of professional bank robbers start to feel the heat from police when they unknowingly leave a clue at their latest heist." release_year 1995 genre "Thriller" rating 8.2 votes 559490 imdb_id tt0113277


    > HSET "movie:11005" title "Star Wars: Episode VI - Return of the Jedi" genre "Action" votes 906260 rating 8.3 release_year 1983 plot "The Rebels dispatch to Endor to destroy the second Empire's Death Star." ibmdb_id "tt0086190"


    alt_text

    Now it is possible to get information from the hash using the movie ID. For example if you want to get the title, and rating execute the following command:

    >> HMGET movie:11002 title rating

    1) "Star Wars: Episode V - The Empire Strikes Back"
    2) "8.7"



    And you can increment the rating of this movie using:

    HINCRBYFLOAT movie:11002 rating 0.1

    But how do you get a movie or list of movies by year of release, rating or title?

    One option, would be to read all the movies, check all fields and then return only matching movies; no need to say that this is a really bad idea.

    Nevertheless this is where Redis developers often create custom secondary indexes using SET/SORTED SET structures that point back to the movie hash. This needs some heavy design and implementation.

    This is where the Redis Search module can help, and why it was created.

    Step 12. Redis Search & Indexing

    Redis Search greatly simplifies this by offering a simple and automatic way to create secondary indices on Redis Hashes. (more datastructure will eventually come)

    ![Secondary Index](https://github.com/Redis Search/redisearch-getting-started/blob/master/docs/images/secondary-index.png?raw=true)

    Using Redis Search if you want to query on a field, you must first index that field. Let's start by indexing the following fields for our movies:

    • Title
    • Release Year
    • Rating
    • Genre

    When creating a index you define:

    • which data you want to index: all hashes with a key starting with movies
    • which fields in the hashes you want to index using a Schema definition.

    Warning: Do not index all fields

    Indexes take space in memory, and must be updated when the primary data is updated. So create the index carefully and keep the definition up to date with your needs.

    Step 13. Create the Index

    >> FT.CREATE idx:movie ON hash PREFIX 1 "movie:" SCHEMA title TEXT SORTABLE release_year NUMERIC SORTABLE rating NUMERIC SORTABLE genre TAG SORTABLE

    "OK"

    alt_text

    The database contains a few movies, and an index, it is now possible to execute some queries.

    Query: All the movies that contains the string "war"

    alt_text

    Query: Limit the list of fields returned by the query using the RETURN parameter

    The FT.SEARCH commands returns a list of results starting with the number of results, then the list of elements (keys & fields).

    As you can see the movie Star Wars: Episode V - The Empire Strikes Back is found, even though you used only the word “war” to match “Wars” in the title. This is because the title has been indexed as text, so the field is tokenized and stemmed.

    Later when looking at the query syntax in more detail you will learn more about the search capabilities.

    It is also possible to limit the list of fields returned by the query using the RETURN parameter, let's run the same query, and return only the title and release_year:

    alt_text

    Query: All the movies that contains the string "war but NOT the jedi one"

    Adding the string -jedi (minus) will ask the query engine not to return values that contain jedi.

    _"war -jedi" RETURN 2 title release_year_

    alt_text

    All the movies that contains the string "gdfather using fuzzy search"

    alt_text

    Query: All Thriller movies

    @genre:{Thriller}" RETURN 2 title release_year

    alt_text

    Query: All Thriller or Action movies

    @genre:{Thriller|Action}" RETURN 2 title release_year

    alt_text

    Query : All the movies released between 1970 and 1980 (included)

    The FT.SEARCH syntax has two ways to query numeric fields:

    • using the FILTER parameter

    • FILTER release_year 1970 1980 RETURN 2 title release_year

    alt_text

    Step 15. AGGREGATION

    Query: Number of movies by year

    "*" GROUPBY 1 @release_year REDUCE COUNT 0 AS nb_of_movies

    1

    alt_text

    Query: Number of movies by year from the most recent to the oldest

    "*" GROUPBY 1 @release_year REDUCE COUNT 0 AS nb_of_movies SORTBY 2 @release_year DESC

    1

    alt_text

    Redis Launchpad
    - + \ No newline at end of file diff --git a/explore/redisinsight/redisgears/index.html b/explore/redisinsight/redisgears/index.html index 3f93a313ff..42f3660895 100644 --- a/explore/redisinsight/redisgears/index.html +++ b/explore/redisinsight/redisgears/index.html @@ -4,7 +4,7 @@ Write Your Serverless Redis function using RedisInsight Browser Tool | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    Write Your Serverless Redis function using RedisInsight Browser Tool


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    RedisInsight has built-in support for Redis with JSON, Search, Graph, Streams, Time Series, and Triggers and Functions. Triggers and Functions enable reactive programming at the database level. It's like using lambda functions, but with a dramatically lower latency, and with much less encoding/decoding overhead.

    Support for Triggers and Functions was first introduced in RedisInsight v1.5.0. RedisInsights allows you:

    • Explore the latest executed functions and analyze the results or errors.
    • Manage registered functions and get execution summary.
    • Code, build and execute functions.

    Triggers and Functions enable developers to write and execute functions that implement data flows in Redis, while abstracting away the data’s distribution and deployment. These capabilities enable efficient data processing using multiple models in Redis with infinite programmability, while remaining simple to use in any environment.

    Follow the below steps to get started with the RedisInsight browser tool for RedisGears.

    Step 1. Create Redis database

    Follow this link to get started with Redis Stack

    Step 2: Download RedisInsight

    To install RedisInsight on your local system, you need to first download the software from the Redis website.

    Click this link to access a form that allows you to select the operating system of your choice.

    My Image

    Run the installer. After the web server starts, open http://YOUR_HOST_IP:8001 and add a Redis database connection.

    Select "Connect to a Redis database" My Image

    Enter the requested details, including Name, Host (endpoint), Port, and Password. Then click “ADD REDIS DATABASE”.

    Step 3. Clone the repository

    $ git clone https://github.com/RedisGears/ImdbExample
    $ cd ImdbExample

    Step 4. Download the IMDB data

    Download the data from this link and extract it to the current directory: https://datasets.imdbws.com/title.basics.tsv.gz

    $ wget https://datasets.imdbws.com/title.basics.tsv.gz
    $ gunzip title.basics.tsv.gz

    Step 5. Execute the script

    $ python3 UploadImdb.py -H localhost -P 6379

    python3 UploadImdb.py -H 192.168.1.9 -P 6379
    /Users/ajeetraina/projects/redis-datasets/redisgears/ImdbExample/UploadImdb.py:27: DeprecationWarning: Pipeline.hmset() is deprecated. Use Pipeline.hset() instead.
    pipe.hmset(d['tconst'], d)
    done

    Step 6. Accessing RedisInsight

    Choose “RedisGears” on the left menu.

    alt_text

    Step 7. Add the below script:

    GB('KeysOnlyReader').map(lambda x: execute('hget', x, 'genres')).flatmap(lambda x: x.split(',')).countby().run()

    alt_text

    alt_text

    Additional References

    Redis Launchpad
    - + \ No newline at end of file diff --git a/explore/redisinsight/redisgraph/index.html b/explore/redisinsight/redisgraph/index.html index 3db2c95194..383f093886 100644 --- a/explore/redisinsight/redisgraph/index.html +++ b/explore/redisinsight/redisgraph/index.html @@ -4,7 +4,7 @@ Query, Visualize and Manipulate Graphs using RedisGraph Browser Visualization Tool | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    Query, Visualize and Manipulate Graphs using RedisGraph Browser Visualization Tool

    End-of-Life Notice

    Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

    End of support is scheduled for January 31, 2025.

    Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    If you’re a Redis user who prefers to use a Graphical User Interface(GUI) for graph queries, then RedisInsight is a right tool for you. It’s 100% free pure desktop Redis GUI that provides easy-to-use browser tools to query, visualize and interactively manipulate graphs. You can add new graphs, run queries and explore the results over the GUI tool.

    RedisInsight supports RedisGraph and allows you to:

    • Build and execute queries
    • Navigate your graphs
    • Browse, analyze, and export results
    • Keyboard shortcuts to zoom
    • Button to reset view; center entire graph
    • Zoom capability via mouse wheel(Double right-click to zoom out, Double right-click to zoom out.)
    • Ability to copy commands with a button click
    • Ability to persist nodes display choices between queries

    As a benefit, you get faster turnarounds when building your application using Redis and RedisGraph.

    Follow the below steps to see how your data is connected via the RedisInsight Browser tool.

    Step 1. Create Redis database

    Follow this link to create a Redis database using Redis Enterprise Cloud with RedisGraph module enabled

    alt_text

    Step 2: Download RedisInsight

    To install RedisInsight on your local system, you need to first download the software from the Redis website.

    Click this link to access a form that allows you to select the operating system of your choice.

    My Image

    Run the installer. After the web server starts, open http://YOUR_HOST_IP:8001 and add a Redis database connection.

    Select "Connect to a Redis database" My Image

    Enter the requested details, including Name, Host (endpoint), Port, and Password. Then click “ADD REDIS DATABASE”.

    Step 3: Click “RedisGraph” and then “Add Graph”

    Select RedisGraph from the menu.

    alt_text

    Step 4. Create a new Graph called “Friends”

    alt_text

    Let us add individuals to the graph. CREATE is used to introduce new nodes and relationships.Run the below cypher query on RedisInsight GUI to add a label called person and property called “name”.

    CREATE (:Person{name:"Tom" }),  (:Person{name:"Alex" }), (:Person{name:"Susan" }), (:Person{name:"Bill" }), (:Person{name:"Jane" })

    alt_text

    As we see that “1” label is added and that refers to a person label. It’s the same for every node and hence created once. Overall there are 5 nodes created. The five “name” properties refer to 5 name properties that have been added.

    Step 6: View all the individuals (nodes)

    Match describes the relationship between queried entities, using ascii art to represent pattern(s) to match against. Nodes are represented by parentheses () , and Relationships are represented by brackets [] .

    As shown below, we have added lowercase “p” in front of our label and is a variable we can make a reference to. It returns all the nodes with a label called “Person”.

    MATCH (p:Person) RETURN p

    alt_text

    You can select "Graph View" on the right menu to display the graphical representation as shown below:

    alt_text

    Step 7. Viewing just one individual(node)

    MATCH (p:Person {name:"Tom"}) RETURN p

    alt_text

    Step 8: Visualize the relationship between the individuals

    Run the below query to build a relationship between two nodes and how the relationship flows from one node(“Tom”) to the another node(“Alex”).

    MATCH (p1:Person {name: "Tom" }), (p2:Person {name: "Alex" }) CREATE (p1)-[:Knows]->(p2)

    The symbol “>” (greater than) shows which way the relationship flows.

    alt_text

    You can view the relationship in the form of graph as shown below:

    alt_text

    Step 9. Create and visualize the multiple relationships

    Run the below query to create and visualize relationsship between the multiple individuals

    MATCH (p1:Person {name: "Tom" }), (p2:Person {name: "Susan" }), (p3:Person {name: "Bill" }) CREATE (p1)-[:Knows]->(p2), (p1)-[:Knows]->(p3)

    alt_text

    Step 10. Create and visualize the relationship between two individuals (Susan and Bill)

    Let us look at how to generate graph showcasing the relationship between two individuals - Susan and Bill

    MATCH (p1:Person {name: "Susan"}), (p2:Person {name: "Bill"}) CREATE (p1)-[:Knows]->(p2)

    alt_text

    Step 11. Create and visualize the relationship between two indiviual (Bill and Jane)

    MATCH (p1:Person {name: "Bill"}), (p2:Person {name: "Jane"}) CREATE (p1)-[:Knows]->(p2)

    alt_text

    alt_text

    Step 12. Building a social networking

    This can be achieved by “friend of friends” kind of relationship. Say, If Tom wanted to social network with Jane. He has two contacts that know Jane - one is Susan and the other person is Bill.

    alt_text

    MATCH p = (p1:Person {name: "Tom" })-[:Knows*1..3]-(p2:Person {name: "Jane"}) RETURN p

    In this query, we assign a variable “p” to a node graph path. We search for “Tom” as p1 and “Jane” as “p2”. We say interested in knows link with 1..3 degree of separation.

    alt_text

    Step 13. Cleaning up the Graph

    alt_text

    Importing the Bulk Graph data

    Let us try to insert bulk data using Python and then extrapolate it in the form of nodes and relationships.

    Step 14. Cloning the repository**

    $ git clone https://github.com/redis-developer/redis-datasets
    cd redis-datasets/redisgraph/datasets/iceandfire

    Step 15. Execute the script

    $ python3 bulk_insert.py GOT_DEMO -n data/character.csv -n data/house.csv -n data/book.csv -n data/writer.csv -r data/wrote.csv -r data/belongs.csv -h 192.168.1.9 -p 6379



    2124 nodes created with label 'b'character''
    438 nodes created with label 'b'house''
    12 nodes created with label 'b'book''
    3 nodes created with label 'b'writer''
    14 relations created for type 'b'wrote''
    2208 relations created for type 'b'belongs''
    Construction of graph 'GOT_DEMO' complete: 2577 nodes created, 2222 relations created in 0.169954 seconds


    Step 16. Run the cypher query

    GRAPH.QUERY GOT_DEMO "MATCH (w:writer)-[wrote]->(b:book) return w,b"

    alt_text

    Additional Resources

    Redis Launchpad
    - + \ No newline at end of file diff --git a/explore/redisinsight/redistimeseries/index.html b/explore/redisinsight/redistimeseries/index.html index 4978ed9f70..630dc2a5b5 100644 --- a/explore/redisinsight/redistimeseries/index.html +++ b/explore/redisinsight/redistimeseries/index.html @@ -4,7 +4,7 @@ Manage Redis time-series data using the RedisInsight Browser Tool | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    Manage Redis time-series data using the RedisInsight Browser Tool


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    If you want to visualize a time series data structure to your Redis database then download RedisInsight.

    Redis Stack enhances your experience managing time-series data with Redis. It simplifies the use of Redis for time-series use cases such as internet of things (IoT) data, stock prices, and telemetry. With Redis Time Series, you can ingest and query millions of samples and events at the speed of Redis. Advanced tooling such as downsampling and aggregation ensure a small memory footprint without impacting performance. Use a variety of queries for visualization and monitoring with built-in connectors to popular monitoring tools like Grafana, Prometheus, and Telegraf.

    With RedisInsight browser tool, you can perform the below sets of activities:

    • TS.RANGE & TS.MRANGE are supported
    • Charts support milliseconds.
    • Ability to configure auto refresh interval.
    • Ability to submit query with ‘ctrl + enter’ in single line mode
    • Display tabular as well as JSON view

    Step 1. Create Redis database

    Follow this link to create Redis Stack database that comes with support for time series data structures

    Step 2: Download RedisInsight

    To install RedisInsight on your local system, you need to first download the software from the Redis website.

    Click this link to access a form that allows you to select the operating system of your choice.

    My Image

    Run the installer. After the web server starts, open http://YOUR_HOST_IP:8001 and add a Redis database connection.

    Select "Connect to a Redis database" My Image

    Enter the requested details, including Name, Host (endpoint), Port, and Password. Then click “ADD REDIS DATABASE”.

    Step 3. Clone the repository

    $ git clone https://github.com/redis-developer/redis-datasets
    cd redis-datasets/redistimeseries/AirQualityUCI

    Step 4. Execute the Python script

    #!/usr/bin/env python3
    # -*- coding: utf-8 -*-

    """sample module for dataset loading into redis stack from csv file
    """

    import argparse
    import redis
    import csv
    import datetime
    import logging
    from tqdm import tqdm


    def parse_dataset_row(line):

    result = False
    date = None
    Time = None
    unix_ts = None
    carbon_monoxide = None
    temperature_c = None
    relative_humidity = None
    # check if we have 15 fields or more, and all fields have something on it
    if len(line) > 14 and sum([len(line[x]) > 0 for x in range(0, 14)]) == 14:
    str_date = line[0]
    str_time = line[1]
    carbon_monoxide = (
    float(line[2].replace(",", "."))
    if (float(line[2].replace(",", ".")) > -200.0)
    else None
    )
    temperature_c = (
    float(line[12].replace(",", "."))
    if (float(line[12].replace(",", ".")) > -200.0)
    else None
    )
    relative_humidity = (
    float(line[13].replace(",", "."))
    if (float(line[13].replace(",", ".")) > -200.0)
    else None
    )
    unix_ts = int(
    datetime.datetime.strptime(
    "{0} {1}".format(str_date, str_time), "%d/%m/%Y %H.%M.%S"
    ).timestamp()
    )
    result = True

    return result, unix_ts, carbon_monoxide, temperature_c, relative_humidity


    parser = argparse.ArgumentParser()
    parser.add_argument("--port", type=int, help="redis instance port", default=6379)
    parser.add_argument(
    "--password", type=int, help="redis instance password", default=None
    )
    parser.add_argument("--verbose", help="enable verbose output", action="store_true")
    parser.add_argument("--host", type=str, help="redis instance host", default="127.0.0.1")
    parser.add_argument(
    "--csv",
    type=str,
    help="csv file containing the dataset",
    default="./AirQualityUCI/AirQualityUCI.csv",
    )
    parser.add_argument(
    "--csv_delimiter", type=str, help="csv file field delimiter", default=";"
    )
    args = parser.parse_args()

    log_level = logging.ERROR
    if args.verbose is True:
    log_level = logging.INFO
    logging.basicConfig(level=log_level)

    # redis setup
    redis_obj = redis.Redis(host=args.host, port=args.port, password=args.password)
    temperature_key = "ts:temperature"
    carbon_monoxide_key = "ts:carbon_monoxide"
    relative_humidity_key = "ts:relative_humidity"

    with open(args.csv, newline="") as csv_file:
    csv_reader = csv.reader(csv_file, delimiter=args.csv_delimiter)
    next(csv_reader, None) # skip the headers
    for row in tqdm(csv_reader):
    (
    result,
    unix_ts,
    carbon_monoxide,
    temperature_c,
    relative_humidity,
    ) = parse_dataset_row(row)
    if result is True:
    try:
    if temperature_c is not None:
    redis_obj.execute_command(
    "ts.add", temperature_key, unix_ts, temperature_c
    )
    logging.info(
    "ts.add {0} {1} {2}".format(
    temperature_key, unix_ts, temperature_c
    )
    )
    if carbon_monoxide is not None:
    redis_obj.execute_command(
    "ts.add", carbon_monoxide_key, unix_ts, carbon_monoxide
    )
    logging.info(
    "ts.add {0} {1} {2}".format(
    carbon_monoxide_key, unix_ts, carbon_monoxide
    )
    )
    if relative_humidity is not None:
    redis_obj.execute_command(
    "ts.add", relative_humidity_key, unix_ts, relative_humidity
    )
    logging.info(
    "ts.add {0} {1} {2}".format(
    relative_humidity_key, unix_ts, relative_humidity
    )
    )
    except redis.RedisError as err:
    logging.error(err)

    Step 5. Execute the script

    $ python3 dataloader.py
    9471it [00:29, 326.33it/s]

    Step 6. Query a range across one or multiple time-series

    TS.RANGE ts:carbon_monoxide 1112596200 1112603400

    alt_text

    Step 7 . Displaying the JSON view

    alt_text

    Step 8. Displaying the tabular view

    alt_text

    Additional Resources

    Redis Launchpad
    - + \ No newline at end of file diff --git a/explore/redisinsight/slowlog/index.html b/explore/redisinsight/slowlog/index.html index 4cb89f524b..299604b0ce 100644 --- a/explore/redisinsight/slowlog/index.html +++ b/explore/redisinsight/slowlog/index.html @@ -4,7 +4,7 @@ Debug Redis using RedisInsight Slowlog Debugging Tool | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    Debug Redis using RedisInsight Slowlog Debugging Tool


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    RedisInsight, a free GUI for Redis, allows you to identify and troubleshoot bottlenecks with the Slowlog analysis tool. If you are experiencing high latency and high CPU usage with Redis operations and looking for a tool for debugging and tracing your Redis database, RedisInsight Slow Log is a perfect tool for you.

    Redis Slow Log is highly effective at showing the actual processing time of each slow command. The Redis slowlog is a log of all commands which exceed a specified run time.

    note

    Network latency is not included in the measurement, just the time taken to actually execute the command. Redis Slow Log is a list of slow operations for your Redis instance.

    Follow the below steps to see how Slowlog is leveraged to troubleshoot performance issues.

    Step 1. Create a Redis database

    Follow https://developer.redis.com/create to install and create Redis database

    Step 2: Download RedisInsight

    To install RedisInsight on your local system, you need to first download the software from the Redis website.

    Click this link to access a form that allows you to select the operating system of your choice.

    My Image

    Run the installer. After the web server starts, open http://YOUR_HOST_IP:8001 and add a Redis database connection.

    Select "Connect to a Redis database" My Image

    Enter the requested details, including Name, Host (endpoint), Port, and Password. Then click “ADD REDIS DATABASE”.

    Step 3. Connect to the database using RedisInsight GUI

    alt_text

    Step 4: Click “Slowlog” and then “Configure Slowlog”

    alt_text

    Step 5. Configure Slowlog

    There are two configurations related to slowlog query -

    • slowlog-log-slower-than: Used to set the evaluation time of slow query, that is to say, commands that exceed this configuration item will be treated as slow operations and recorded in the slow query log. Its execution unit is microseconds (1 second equals 1000000 microseconds);
    • slowlog-max-len: Used to configure the maximum number of records in the slow query log.

    Please note that a negative number disables the slowlog, while a value of zero forces the logging of every command. Slowlog-max-len is the length of the slowlog. The minimum value is zero. When a new command is logged and the slowlog is already at its maximum length, the oldest one is removed from the queue of logged commands in order to make space. The configuration can be done by editing redis.conf or while the server is running using the CONFIG GET and CONFIG SET commands.

    Slowlog will log the last X number(amount) of queries which took more time than Y microseconds to run. You can set this either in redis.conf or at runtime using CONFIG command

     CONFIG SET slowlog-log-slower-than 500
    CONFIG SET slowlog-max-len 50

    alt_text

    Step 6. Prepare a script to add large dataset to Redis database

    To see slowlog in action, let us pick up a large dataset. Create a file called importcities.py and add the below content:

     import csv
    import config
    from redis import Redis

    # Database Connection
    host = config.REDIS_CFG["host"]
    port = config.REDIS_CFG["port"]
    pwd = config.REDIS_CFG["password"]
    redis = Redis(host=host, port=port, password=pwd, charset="utf-8", decode_responses=True)

    # Import Cities
    print("Importing ...")

    count = 0

    with open("data/worldcities.csv", 'r') as cities:
    reader = csv.DictReader(cities)
    for row in reader:
    id = row["id"]
    name = row["city_ascii"]
    lng = row["lng"]
    lat = row["lat"]
    country = row["country"]
    pop = row["population"]

    print("id = {}, name = {}, lng = {}, lat = {}".format(id, name, lng, lat))
    count += 1

    redis.hmset("ct:{}".format(id), { "_id" : id, "name" : name, "country" : country, "population" : pop })
    redis.geoadd("idx:cities", lng, lat, id)
    redis.hset("idx:city_by_name", name, id)

    Create a file called config.py as shown below:

     REDIS_CFG = {
    "host" : "localhost",
    "port" : 6379,
    "password" : ""
    }

    Ensure that you provide the right host and port details.

    Execute the script:

     python3 importcities.py

    You will see the below results:

     id = 762, name = Labatt Ontario Breweries, lng = -81.2467, lat = 42.9778
    id = 915, name = Ninkasi Brewing, lng = -123.11, lat = 44.0569
    id = 930, name = Oaken Barrel Brewing, lng = -86.0901, lat = 39.615
    Import of 16790 records completed

    If you want to simulate slowlogs, then consider using KEYS command. It is always recommended NOT TO USE KEYS in your regular application code. If you're looking for a way to find keys in a subset of your keyspace, consider using SCAN or sets.

    The KEYS command may ruin performance when it is executed against large databases

    Let us try to run KEYS * in RedisInsight CLI and see if it generates slowlog as shown below:

    alt_text

    Run it one more time and you will notice below:

    alt_text

    Try decreasing the execution time(50 ms), and you will notice that the below run query also gets logged into the slowlog

    alt_text

    Step 7. Configuring the execution time

    Each entry in the slowlog contains four fields: a slowlog entry ID, the Unix timestamp of when the command was run, the execution time in microseconds, and an array with the command itself, along with any arguments. See the example output below:

    In order to retrieve the slowlog queries, you have to use SLOWLOG GET X. Where X is the number of slow queries you want to retrieve.

    alt_text

    As shown above, the result displays a unique id, timestamp, time taken to execute the query in microseconds, and the actual command + parameter executed. It is important to note that the Slow log is transient; there's no persistence for it so in the case of failover, the slowlog is lost. If you are looking to rely on a persistent slowlog, you'll be wanting to reconsider your design choices

    If I choose “0” it forces the logging of every command while “-1” disabled the slowlog.

    alt_text

    alt_text

    note

    In a clustered database, each node can have different values for slowlog. You will need to use the configuration tool in order to configure slowlog for clustered databases.

    Redis Launchpad
    - + \ No newline at end of file diff --git a/explore/redisinsight/streams/index.html b/explore/redisinsight/streams/index.html index 27006bd2fa..2fc2984286 100644 --- a/explore/redisinsight/streams/index.html +++ b/explore/redisinsight/streams/index.html @@ -4,7 +4,7 @@ Use Redis Streams Consumer Groups with RedisInsight | The Home of Redis Developers - + @@ -15,7 +15,7 @@ A Redis Stream is a Redis data type that represents a log, so you can add new information and message in an append-only mode. Redis Streams lets you build “Kafka-like” applications, which can:

    • Create applications that publish and consume messages. Nothing extraordinary here, you could already do that with Redis Pub/Sub(Publisher/Subscriber).
    • Consume messages that are published even when the client application (consumer) is not running. This is a big difference from Redis Pub/Sub.
    • Consume messages starting from a specific point. For example, read the whole history or only new messages.

    In addition, Redis Streams has the concept of a consumer group. Redis Streams consumer groups, like the similar concept in Apache Kafka, allows client applications to consume messages in a distributed fashion (multiple clients), making it easy to scale and create highly available systems.

    Let’s dive under the covers and see Redis Streams through the lens of RedisInsight. You will see how to use the Redis to publish and consume messages using a consumer group. This is the first basic example that uses a single consumer.

    Prerequisite:

    Step 1. Run a Redis server

    Redis is an open source, in-memory, key-value data store most commonly used as a primary database, cache, message broker, and queue. Redis delivers sub-millisecond response times, enabling fast and powerful real-time applications in industries such as gaming, fintech, ad-tech, social media, healthcare, and IoT. You can run a Redis database directly over your local mac os or in a container. If you have Docker installed in your sytem, type the following command:

     docker run -d -p 6379:6379 redis/redis-stack:latest

    You can connect to Redis server using the redis-cli command like this:

     redis-cli

    The above command will make a connection to the Redis server. It will then present a prompt that allows you to run Redis commands.

    note

    You can connect to Redis server using multiple clients.

    Step 2: Download RedisInsight

    To install RedisInsight on your local system, you need to first download the software from the Redis website.

    Click this link to access a form that allows you to select the operating system of your choice.

    My Image

    Run the installer. After the web server starts, open http://YOUR_HOST_IP:8001 and add a Redis database connection.

    Select "Connect to a Redis database" My Image

    Enter the requested details, including Name, Host (endpoint), Port, and Password. Then click “ADD REDIS DATABASE”.

    Step 3. Cloning the repository

     git clone https://github.com/redis-developer/redis-streams-101-java
    cd redis-streams-101-java
    mvn clean verify

    Step 4. Run the producer(Post a new message)


    mvn exec:java -Dexec.mainClass="com.kanibl.redis.streams.simple.RedisStreams101Producer" -Dexec.args="5"

    Downloaded from central: https://repo.maven.apache.org/maven2/org/sonatype/sisu/sisu-guice/2.1.7/sisu-guice-2.1.7-noaop.jar (472 kB at 450 kB/s)
    Downloaded from central: https://repo.maven.apache.org/maven2/org/slf4j/slf4j-api/1.7.5/slf4j-api-1.7.5.jar (26 kB at 25 kB/s)
    Downloaded from central: https://repo.maven.apache.org/maven2/commons-codec/commons-codec/1.11/commons-codec-1.11.jar (335 kB at 313 kB/s)

    Sending 5 message(s)
    May 18, 2021 1:07:00 PM io.lettuce.core.EpollProvider <clinit>
    INFO: Starting without optional epoll library
    May 18, 2021 1:07:00 PM io.lettuce.core.KqueueProvider <clinit>
    INFO: Starting without optional kqueue library
    Message 1621343220998-0 : {sensor_ts=1621343220975, loop_info=0, speed=15, direction=270} posted
    Message 1621343221009-0 : {sensor_ts=1621343221007, loop_info=1, speed=15, direction=270} posted
    Message 1621343221016-0 : {sensor_ts=1621343221011, loop_info=2, speed=15, direction=270} posted
    Message 1621343221019-0 : {sensor_ts=1621343221017, loop_info=3, speed=15, direction=270} posted
    Message 1621343221023-0 : {sensor_ts=1621343221021, loop_info=4, speed=15, direction=270} posted


    [INFO] ------------------------------------------------------------------------
    [INFO] BUILD SUCCESS
    [INFO] ------------------------------------------------------------------------
    [INFO] Total time: 9.102 s
    [INFO] Finished at: 2021-05-18T13:07:01Z
    [INFO] ------------------------------------------------------------------------

    Step 5. Run the consumer(Consume messages)

    Open a new terminal and run this command:


    mvn exec:java -Dexec.main

    The consumer will start and consume the message you just posted, and wait for any new messages.

    Step 6: Posting the new messages

    In the first terminal, let us post new entries to a Redis stream:

     mvn exec:java -Dexec.mainClass="com.kanibl.redis.streams.simple.RedisStreams101Producer" -Dexec.args="100"

    Let us try to visualise the latest message using the RedisInsight browser tool. Make sure ‘Stream Data’ is selected and select any one of the streams. For a specified stream, you’ll find a table showing data in that stream along with a timestamp of when each entry was added.

    alt_text

    To see the processing side of the stream select ‘Stream Data”. You will see 105 records under the streaming data.

    alt_text

    Click on “Consumer Group” to see application_1 as promising active consumers.

    alt_text

    RedisInsight also provide you to select fields as shown under “View Columns” section.

    alt_text

    It also displays pending items/messages for the specific streams as shown above.

    Redis Launchpad
    - + \ No newline at end of file diff --git a/explore/redisinsight/usinghelm/index.html b/explore/redisinsight/usinghelm/index.html index 04a5efe2ad..917178bd69 100644 --- a/explore/redisinsight/usinghelm/index.html +++ b/explore/redisinsight/usinghelm/index.html @@ -4,7 +4,7 @@ Installing RedisInsight using Helm | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    Installing RedisInsight using Helm


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Helm is a package manager for Kubernetes. It is the best way to find, share, and use software built for Kubernetes. It is the K8s equivalent of yum or apt. Helm helps you manage Kubernetes applications — Helm Charts help you define, install, and upgrade even the most complex Kubernetes application. Helm is a graduated project in the CNCF and is maintained by the Helm community.

    Benefits of Helm:

    • Improves developer productivity
    • Makes application deployment easy, standarized and reusable
    • Enhances operational readiness
    • Reduces the complexity of deployments of microservices
    • Speeds up the adaptation of cloud native applications

    It is possible to install RedisInsight using Helm chart. A full-featured desktop GUI client, RedisInsight is an essential tool for Redis developers. It is a lightweight multi-platform management visualization tool that helps you design, develop, and optimize your application capabilities in a single easy-to-use environment. RedisInsight not just makes it easier to interact with your databases and manage your data, but also helps in managing Redis Cluster with ease.

    Getting Started

    Step 1. Install the Prerequisites

    Install Docker Desktop for Mac and enable Kubernetes as shown below:

    alt_text

    Step 2. Install Helm on your Mac system

     brew install helm

    Step 3. Verify if helm is installed correctly

     helm version
    version.BuildInfo{Version:"v3.6.1",
    GitCommit:"61d8e8c4a6f95540c15c6a65f36a6dd0a45e7a2f", GitTreeState:"dirty",
    GoVersion:"go1.16.5"}

    Step 4. Download RedisInsight Helm Chart

     wget https://docs.redis.com/latest/pkgs/redisinsight-chart-0.1.0.tgz

    Step 5. Verify if Kubernetes is up and running

     kubectl get nodes
    NAME STATUS ROLES AGE VERSION
    docker-desktop Ready master 22d v1.19.7

    Step 6. Install RedisInsight using Helm chart

     helm install redisinsight redisinsight-chart-0.1.0.tgz --set service.type=NodePort

    NAME: redisinsight
    LAST DEPLOYED: Sat Jun 26 11:40:11 2021
    NAMESPACE: default
    STATUS: deployed
    REVISION: 1
    NOTES:
    1. Get the application URL by running these commands:
    export NODE_PORT=$(kubectl get --namespace default -o
    jsonpath="{.spec.ports[0].nodePort}" services redisinsight-redisinsight-chart)
    export NODE_IP=$(kubectl get nodes --namespace default -o
    jsonpath="{.items[0].status.addresses[0].address}")
    echo http://$NODE_IP:$NODE_PORT

    Step 7. Get the application URL

     export NODE_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services redisinsight-redisinsight-chart)
    export NODE_IP=$(kubectl get nodes --namespace default -o jsonpath="{.items[0].status.addresses[0].address}")

    Step 8. Listing the IP address

     echo http://$NODE_IP:$NODE_PORT
    http://192.168.65.4:30269

    Step 9. Listing the Helm Chart

     helm list
    NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
    redisinsight default 1 2021-06-26 11:40:11.82793 +0530 IST deployed redisinsight-chart-0.1.0

    images

    Step 10. Listing the Redisinsight Pods

     kubectl get po
    NAME READY STATUS RESTARTS AGE
    fortune 2/2 Running 8 22d
    redisinsight-redisinsight-chart-857b486d8f-w9xpv 1/1 Running 0 15m

    Step 11. Accessing RedisInsight

    images images

    References

    Redis Launchpad
    - + \ No newline at end of file diff --git a/explore/redisinsightv2/browser/index.html b/explore/redisinsightv2/browser/index.html index 008508bc28..af92cf2394 100644 --- a/explore/redisinsightv2/browser/index.html +++ b/explore/redisinsightv2/browser/index.html @@ -4,7 +4,7 @@ Visualize Redis Database keys using the RedisInsight Browser Tool | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Visualize Redis Database keys using the RedisInsight Browser Tool


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    My Image

    RedisInsight is a 100% free Redis GUI that allows you to visualise, monitor, and optimize while developing your applications with Redis. It provides an intuitive and efficient GUI for Redis allowing developers like you to interact with your databases and manage your data. RedisInsight v2.0 now incorporates a completely new tech stack based on the popular Electron and Elastic UI frameworks. You can run the application locally along with your favorite IDE, and it remains cross-platform, supported on Linux, Windows, and MacOS.

    What's New in the RedisInsight v2.0 Browser Tool?

    RedisInsight Browser lets you explore keys in your Redis server. You can add, edit and delete a key. You can even update the key expiry and copy the key name to be used in different parts of the application. Below are the list of features available under the browser tool:

    • Browse, filter and visualize key-value Redis data structures
    • Visual cues per data type
    • Quick view of size and ttl in the main browser view
    • Ability to filter by pattern and/or data type
    • Ability to change the number of keys to scan through during filtering
    • CRUD support for Lists, Hashes, Strings, Sets, Sorted Sets
    • Search within the data structure (except for Strings)
    • CRUD support for Redis JSON

    In order to understand the capabilities of the browser tool, let us take a simple example and demonstrate each of the browser tool's options:

    Step 1. Install RedisInsight

    To use RedisInsight on a local Mac, you can install Redis Stack by running the following commands:

    First, tap the Redis Stack Homebrew tap and then run brew install as shown below:

     brew tap redis-stack/redis-stack
    brew install --cask redis-stack

    This will install all Redis and Redis Stack binaries. How you run these binaries depends on whether you already have Redis installed on your system.

     ==> Installing Cask redis-stack-redisinsight
    ==> Moving App 'RedisInsight-preview.app' to '/Applications/RedisInsight-preview.app'
    🍺 redis-stack-redisinsight was successfully installed!
    ==> Installing Cask redis-stack
    🍺 redis-stack was successfully installed!
    tip

    If this is the first time you’ve installed Redis on your system, then all Redis Stack binaries be installed and accessible on your path. On M1 Macs, this assumes that /opt/homebrew/bin is in your path. On Intel-based Macs, /usr/local/bin should be in the $PATH.

    To check this, run:

     echo $PATH

    Then, confirm that the output contains /opt/homebrew/bin (M1 Mac) or /usr/local/bin (Intel Mac). If these directories are not in the output, see the “Existing Redis installation” instructions below.

    Step 2. Start Redis Stack Server

    You can now start Redis Stack Server as follows:

     redis-stack-server

    Existing Redis installation

    If you have an existing Redis installation on your system, then you’ll need to modify your path to ensure that you’re using the latest Redis Stack binaries.

    Open the file ~/.bashrc or ~/zshrc (depending on your shell), and add the following lines.

      export PATH=/usr/local/Caskroom/redis-stack-server/<VERSION>/bin:$PATH

    Go to Applications and click "RedisInsight-v2" to bring up the Redis Desktop GUI tool.

    Step 3. Add Redis database

    access redisinsight

    Step 4. Enter Redis database details

    Add the local Redis database endpoint and port.

    access redisinsight

    Step 5: Open "Browser Tool"

    Click on the "Key" icon on the left sidebar to open up the browser tool.

    alt_text

    Step 6: Importing keys

    Let us import a user database (6k keys). This dataset contains users stored as Redis Hashes.

    Users

    The user hashes contain the following fields:

    • user:id : The key of the hash.
    • first_name : First Name.
    • last_name : Last name.
    • email : email address.
    • gender : Gender (male/female).
    • ip_address : IP address.
    • country : Country Name.
    • country_code : Country Code.
    • city : City of the user.
    • longitude : Longitude of the user.
    • latitude : Latitude of the user.
    • last_login : Epoch time of the last login.

    Step 7: Cloning the repository

    Open up the CLI terminal and run the following command:

     git clone https://github.com/redis-developer/redis-datasets
    cd redis-datasets/user-database

    Importing the user database:

     redis-cli -h localhost -p 6379 < ./import_users.redis

    Refresh the keys view by clicking as shown below:

    alt_text

    You can get a real-time view of the data in your Redis database as shown below:

    Select any key in the keys view and the key's value gets displayed in the right hand side that includes Fields and values.

    Step 8. Modifying a key

    alt_text

    Enter key name, field and value.

    Step 9: Using CLI

    RedisInsight CLI lets you run commands against a Redis server. You don’t need to remember the syntax - the integrated help shows you all the arguments and validates your command as you type.

    alt_text

    Further References

    Redis Launchpad
    - + \ No newline at end of file diff --git a/explore/redisinsightv2/getting-started/index.html b/explore/redisinsightv2/getting-started/index.html index 8a72563b71..015f5bb75a 100644 --- a/explore/redisinsightv2/getting-started/index.html +++ b/explore/redisinsightv2/getting-started/index.html @@ -4,7 +4,7 @@ Getting Started with RedisInsight | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    Getting Started with RedisInsight


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    My Image

    RedisInsight is a visual tool that lets you do both GUI- and CLI-based interactions with your Redis database, and so much more when developing your Redis based application. It is a fully-featured pure Desktop GUI client that provides capabilities to design, develop and optimize your Redis application. It works with any cloud provider as long as you run it on a host with network access to your cloud-based Redis server. It makes it easy to discover cloud databases and configure connection details with a single click. It allows you to automatically add Redis Enterprise Software and Redis Enterprise Cloud databases.

    What's New in RedisInsight v2.0?

    RedisInsight v2.0 is a complete product rewrite based on a new tech stack comprising of Electron, Monaco Editor and NodeJS. This version contains a number of must-have and most-used capabilities from previous releases, plus a number of differentiators and delighters. You can run the application locally along with your favorite IDE, and it remains cross-platform, supported on Linux, Windows, and MacOS.

    Starting with RedisInsight v2.0 release, the code is open source and publicly available over on GitHub. Below are the list of new features introduced with this latest release:

    • Workbench - An advanced command line interface with intelligent command auto-complete and complex data visualizations
    • Ability to write and render your own data visualizations within Workbench
    • Built-in click-through Redis Guides available
    • Support for Light and Dark themes
    • Enhanced user experience with Browser

    Getting Started

    Using MacOS

    To install RedisInsight on MacOS, the easiest way is to install Redis Stack. Make sure that you have Homebrew installed before starting on the installation instructions below.

    Step 1. Install Redis Stack using Homebrew

    First, tap the Redis Stack Homebrew tap and then run brew install as shown below:

     brew tap redis-stack/redis-stack
    brew install --cask redis-stack

    This will install all Redis and Redis Stack binaries. How you run these binaries depends on whether you already have Redis installed on your system.

     ==> Installing Cask redis-stack-redisinsight
    ==> Moving App 'RedisInsight-preview.app' to '/Applications/RedisInsight-preview.app'
    🍺 redis-stack-redisinsight was successfully installed!
    ==> Installing Cask redis-stack
    🍺 redis-stack was successfully installed!
    tip

    If this is the first time you’ve installed Redis on your system, then all Redis Stack binaries be installed and accessible on your path. On M1 Macs, this assumes that /opt/homebrew/bin is in your path. On Intel-based Macs, /usr/local/bin should be in the path.

    To check this, run:

     echo $PATH

    Then, confirm that the output contains /opt/homebrew/bin (M1 Mac) or /usr/local/bin (Intel Mac). If these directories are not in the output, see the “Existing Redis installation” instructions below.

    Start Redis Stack Server

    You can now start Redis Stack Server as follows:

     redis-stack-server

    Existing Redis installation

    If you have an existing Redis installation on your system, then you’ll need to modify your path to ensure that you’re using the latest Redis Stack binaries.

    Open the file ~/.bashrc or ~/zshrc (depending on your shell), and add the following lines.

      export PATH=/usr/local/Caskroom/redis-stack-server/<VERSION>/bin:$PATH

    Go to Applications and click "RedisInsight-v2" to bring up the Redis Desktop GUI tool.

    Step 2. Add Redis database

    access redisinsight

    Step 3. Enter Redis database details

    Add the local Redis database endpoint and port.

    access redisinsight

    Step 5. Redis for time series

    Redis Stack provides you with a native time series data structure. Let's see how a time series might be useful in our bike shop.

    As we have multiple physical shops too, alongside our online shop, it could be helpful to have an overview of the sales volume. We will create one time series per shop tracking the total amount of all sales. In addition, we will mark the time series with the appropriate region label, east or west. This kind of representation will allow us to easily query bike sales performance per certain time periods, per shop, per region or across all shops.

    Click "Guides" icon(just below the key) in the left sidebar and choose "Redis for the time series" for this demonstration. i

    redis for timeseries

    Step 6. Create time series per shop

     TS.CREATE bike_sales_1 DUPLICATE_POLICY SUM LABELS region east compacted no
    TS.CREATE bike_sales_2 DUPLICATE_POLICY SUM LABELS region east compacted no
    TS.CREATE bike_sales_3 DUPLICATE_POLICY SUM LABELS region west compacted no
    TS.CREATE bike_sales_4 DUPLICATE_POLICY SUM LABELS region west compacted no
    TS.CREATE bike_sales_5 DUPLICATE_POLICY SUM LABELS region west compacted no

    As shown in the following query, we make the shop id (1,2,3,4,5) a part of the time series name. You might also notice the DUPLICATE_POLICY SUM argument; this describes what should be done when two events in the same time series share the same timestamp: In this case, it would mean that two sales happened at exactly the same time, so the resulting value should be a sum of the two sales amounts.

    Since the metrics are collected with a millisecond timestamp, we can compact our time series into sales per hour:

    create time series per shop

    Step 7. Running the query

    execute the query

    Step 8. Time series compaction

    Redis Stack supports downsampling with the following aggregations: avg, sum, min, max, range, count, first and last. If you want to keep all of your raw data points indefinitely, your data set grows linearly over time. However, if your use case allows you to have less fine-grained data further back in time, downsampling can be applied. This allows you to keep fewer historical data points by aggregating raw data for a given time window using a given aggregation function.

    Example:

     TS.CREATERULE bike_sales_5 bike_sales_5_per_day AGGREGATION sum 86400000

    time series compaction

    Overview of RedisInsight Workbench

    With the new RedisInsight v2.0, a Workbench has been introduced. Workbench is basically an advanced command-line interface that lets you run commands against your Redis server. Workbench editor allows comments, multi-line formatting and multi-command execution. It is an Intelligent Redis command auto-complete and syntax highlighting with support for Search, JSON, Graph, Time Series, Triggers and Functions, and Probabilistic data structures. It allows rendering custom data visualization per Redis command using externally developed plugins.

    You can locate the workbench on the left sidebar of RedisInsight dashboard UI. It displays a built-in click-through guides for Redis capabilities. You can also see a number of metrics always on display within the database workspace. These metrics get updated every 5 seconds. The metrics include CPU, number of keys, commands/sec, network input, network output, total memory, number of connected clients.

    My Image

    Check out the reference section to learn more about the new RedisInsight v2.0 features.

    Accessing the CLI

    The new RedisInsight v2.0 comes with a command-line interface with enhanced type-ahead command help. It includes an embedded command helper where you can filter and search for Redis commands. Click on "CLI" option to open CLI window:

    My Image

    Try executing Redis commands as shown below:

    My Image

    RedisInsight allows you to browse, filter and visualize key-value Redis data structures. It support CRUD operation for Lists, Hashes, Strings, Sets, Sorted Sets etc. In our next tutorial, we will explore the browser tool in more details.

    References

    - + \ No newline at end of file diff --git a/explore/redisinsightv2/index.html b/explore/redisinsightv2/index.html index 1feb89d8cb..c563e0a664 100644 --- a/explore/redisinsightv2/index.html +++ b/explore/redisinsightv2/index.html @@ -4,7 +4,7 @@ RedisInsight Developer Hub for Redis Interactive Tutorials | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/explore/redisinsightv2/profiler/index.html b/explore/redisinsightv2/profiler/index.html index ae42f6415c..0b15398e6b 100644 --- a/explore/redisinsightv2/profiler/index.html +++ b/explore/redisinsightv2/profiler/index.html @@ -4,7 +4,7 @@ RedisInsight Profiler Tool - Analyze Your Redis Commands Using Redis Monitor Command | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    RedisInsight Profiler Tool - Analyze Your Redis Commands Using Redis Monitor Command


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    alt_text

    Last week the maintenance release of RedisInsight Preview 2.0 (v2.0.4) was introduced by the RedisInsight Team. RedisInsight v2.0 is a complete product rewrite based on a new tech stack composed of Electron, Elastic UI, Monaco Editor, and Node.js. This newer preview build added a dedicated RedisInsight Profiler UI for the first time. The profiler uses the MONITOR command to analyze every command sent to the Redis instance in real time.

    RedisInsight Profiler analyzes your Redis commands that are being run on the Redis server in real time. The tool provides you detailed information about the number of commands processed, commands/second, and number of connected clients. It also gives information about top prefixes, top keys, and top commands.

    It basically runs the Redis MONITOR command and generates a summarized view. MONITOR is a debugging command that streams back every command processed by the Redis server. It can help in understanding what is happening to the database. This command can both be used via redis-cli and via telnet. All the commands sent to the Redis instance are monitored for the duration of the profiling. The ability to see all the requests processed by the server is useful in order to spot bugs in an application, both when using Redis as a database and as a distributed caching system.

    Follow the below instructions to test drive RedisInsight Profiler tool introduced under the RedisInsight v2.0.4 release:

    Step 1. Create Redis database with Redis Time Series module enabled

    alt_text

    Visit https://developer.redis.com/create/rediscloud and create a Redis database. Follow these steps to enable Redis Time Series module on Redis Enterprise Cloud.

    Step 2. Create database

    Click “Create Database”. Enter database name and select Redis Time Series.

    alt_text

    Once the database is created, you will see the endpoint URL that gets generated. Save it for future reference.

    alt_text

    Step 3. Download RedisInsight

    To install RedisInsight on your local system, you need to first download the software from the Redis website.**

    Click this link to access a form that allows you to select the operating system of your choice.**

    alt_text

    Execute the installer. Once it is installed on your computer, click on the RedisInsight icon to open the tool.

    alt_text

    Step 4. Connect to Redis Enterprise Cloud Database

    alt_text

    alt_text

    As the database is empty, you won’t be able to see any key.

    alt_text

    Step 5. Execute the script

    Below is the script that creates a time series representing sensor temperature measurements. After you create the time series, you can send temperature measurements. Then you can query the data for a time range on some aggregation rule.

    from redistimeseries.client import Client as RedisTimeSeries
    import time
    import sys
    import site
    import datetime
    import random

    print(' \n '.join(sys.path))
    redis = RedisTimeSeries(host='redis-16169.c212.ap-south-1-1.ec2.cloud.redislabs.com', port=16169, password='XXXX')

    # redis.flushdb()
    key = 'temperature'
    def create(key):
    print('\n Create new time series: %s' % str(key))
    #redis.create(key,retentionSecs=30,labels={'sensor_id' : 2,'area_id' : 32})
    redis.create(key,retention_msecs=30000,labels={'sensor_id' : 2,'area_id' : 32})
    print('')
    def store(key, interval):
    print("\n Append new value to time series:\n")
    begin_time = int(time.time())
    for i in range(interval):
    timestamp = int(time.time())
    value = round(random.uniform(0.0,100.0),2)
    timestamp_strftime = datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
    sys.stdout.write(' %s : %.2f \n' % (timestamp_strftime, value))
    sys.stdout.flush()
    #redis.add(key,timestamp,value,retentionSecs=30, labels={'sensor_id' : 2,'area_id' : 32})
    redis.add(key,timestamp,value,retention_msecs=30000, labels={'sensor_id' : 2,'area_id' : 32})
    time.sleep(1)
    end_time = int(time.time()-1)
    return (begin_time, end_time)
    def query(key, begin_time, end_time):
    begin_time_datetime = datetime.datetime.fromtimestamp(begin_time).strftime('%Y-%m-%d %H:%M:%S')
    end_time_datetime = datetime.datetime.fromtimestamp(end_time).strftime('%Y-%m-%d %H:%M:%S')
    print("\n Query time series in range:\n\n %s to %s \n" % (begin_time_datetime, end_time_datetime))
    try:
    #for record in redis.range(key,begin_time, end_time,bucketSizeSeconds=1):
    for record in redis.range(key,begin_time, end_time,bucket_size_msec=1000):
    timestamp = datetime.datetime.fromtimestamp(record[0]).strftime('%Y-%m-%d %H:%M:%S')
    value = round(float(record[1]),2)
    print(' %s : %.2f ' % (timestamp,value))
    except Exception as e:
    print("\n Error: %s" % e)
    print('')
    def print_info():
    print('\n Query time series info:\n')
    for key in redis.keys('*'):
    print(' key=%s' % (key.decode('utf8')))
    info = redis.info(key)
    sensor = info.labels['sensor_id']
    print(" sensor_id=%s " % str(sensor))
    area = info.labels['area_id']
    print(" area_id=%s " % str(area))
    last_time_stamp_seconds = info.__dict__['lastTimeStamp']
    last_time_stamp = datetime.datetime.fromtimestamp(last_time_stamp_seconds).strftime('%Y-%m-%d %H:%M:%S')
    print(" last_time_stamp=%s " % str(last_time_stamp))

    print('')

    def print_loop(loops):

    for i in range(loops):

    if i == 0:
    sys.stdout.write(' ')

    sys.stdout.write('.')
    sys.stdout.flush()
    time.sleep(1)

    print('')

    create(key)
    interval = 10
    begin_time, end_time = store(key,interval)
    time.sleep(1)
    query(key,begin_time,end_time)
    query(key,begin_time+4,end_time-5)
    print_info()
    print('\n Set expire key: %s' % str(key))
    redis.expire(key, (30))
    loops = 30
    print_loop(loops)
    query(key,begin_time,end_time)
    time.sleep(1)
    interval = 1
    create(key)
    begin_time, end_time = store(key,interval)
    time.sleep(1)
    query(key,begin_time,end_time)
    time.sleep(1)
    print('\n Delete key: %s' % str(key))
    redis.delete(key)
    time.sleep(1)

    query(key,begin_time,end_time)

    print('')

    Results:

    Create new time series: temperature


    Append new value to time series:

    2022-02-13 17:52:16 : 36.50
    2022-02-13 17:52:17 : 84.56
    2022-02-13 17:52:18 : 25.90
    2022-02-13 17:52:19 : 29.24
    2022-02-13 17:52:20 : 35.75
    2022-02-13 17:52:21 : 78.14
    2022-02-13 17:52:22 : 28.77
    2022-02-13 17:52:23 : 26.37
    2022-02-13 17:52:24 : 74.93
    2022-02-13 17:52:25 : 46.61

    Query time series in range:

    2022-02-13 17:52:16 to 2022-02-13 17:52:25

    2022-02-13 17:52:16 : 36.50
    2022-02-13 17:52:17 : 84.56
    2022-02-13 17:52:18 : 25.90
    2022-02-13 17:52:19 : 29.24
    2022-02-13 17:52:20 : 35.75
    2022-02-13 17:52:21 : 78.14
    2022-02-13 17:52:22 : 28.77
    2022-02-13 17:52:23 : 26.37
    2022-02-13 17:52:24 : 74.93
    2022-02-13 17:52:25 : 46.61


    Query time series in range:

    2022-02-13 17:52:20 to 2022-02-13 17:52:20

    2022-02-13 17:52:20 : 35.75


    Step 6. Running Profiler

    The new RedisInsight Browser tool allows you to explore keys in your Redis server. You can add, edit, and delete a key. It also helps you to browse, filter, and visualize key-value Redis data structures.

    Open Browser tool and select TS from the drop-down menu as shown below:

    alt_text

    It will display temperature as a key. Choose the “Profiler” option and click on “Start Profiler.”

    alt_text

    Soon you will be able to see the detailed information about the number of commands processed, commands/second, and number of connected clients. It also gives information about top prefixes, top keys, and top commands.

    alt_text

    References:

    - + \ No newline at end of file diff --git a/explore/redisinsightv2/redisearch/index.html b/explore/redisinsightv2/redisearch/index.html index 739e04c686..aac65b6de9 100644 --- a/explore/redisinsightv2/redisearch/index.html +++ b/explore/redisinsightv2/redisearch/index.html @@ -4,7 +4,7 @@ Perform Database Search and Analytics using Redis Stack and RedisInsight | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    Perform Database Search and Analytics using Redis Stack and RedisInsight


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    A full-featured pure desktop GUI client, RedisInsight supports Redis Stack. Redis Stack provides a powerful indexing, querying, and full-text search engine for Redis. With RedisInsight, the following functionalities are possible:

    MyImage

    • Multi-line for building queries
    • Ability to submit query with ‘ctrl + enter’ in single line mode
    • Better handling of long index names in index selector dropdown
    • Supports Aggregation
    • Supports Fuzzy logic
    • Supports simple and complex conditions
    • Sorting
    • Pagination
    • Counting

    Redis Stack allows you to quickly create indexes on datasets (stored as Redis Hashes or as JSON documents), and uses an incremental indexing approach for rapid index creation and deletion. The indexes let you query your data at lightning speed, perform complex aggregations, and filter by properties, numeric ranges, and geographical distance.

    Step 1. Create a Redis Database

    Follow this link to create a Redis database using a Docker container that comes with Search.

    Step 2: Download RedisInsight

    To install RedisInsight on your local system, you need to first download the software from the Redis website.

    Click this link to access a form that allows you to select the operating system of your choice.

    My Image

    Run the installer. Once the installation completes, you should be able to connect to a Redis database.

    Select "Connect to a Redis database".

    My Image

    Enter the requested details, including Name, Host (endpoint), Port, and Password. Then click “ADD REDIS DATABASE”.

    alt_text

    Step 3. Movie Sample Database

    In this section, you will use a simple dataset describing movies, for now, all records are in English. You will learn more about other languages in another tutorial.

    A movie is represented by the following attributes:

    • movie_id : The unique ID of the movie, internal to this database
    • title : The title of the movie.
    • plot : A summary of the movie.
    • genre : The genre of the movie, for now a movie will only have a single genre.
    • release_year : The year the movie was released as a numerical value.
    • rating : A numeric value representing the public's rating for this movie.
    • votes : Number of votes.
    • poster : Link to the movie poster.
    • imdb_id : id of the movie in the IMDB database.

    Key and Data Structure

    As a Redis developer, one of the first things to look at when building your application is to define the structure of the key and data (data design/data modeling).

    A common strategy for Redis is to use specific patterns when naming keys. For example in this application where the database will probably deal with various business objects: movies, actors, theaters, users, ... we can use the following pattern:

    • business_object:key

    For example:

    • movie:001 for the movie with the id 001
    • user:001 the user with the id 001

    and for the movie's information you should use a Redis Hash.

    A Redis Hash allows the application to structure all the movie attributes in individual fields; also Redis Stack will index the fields based on the index definition.

    Step 4. Insert Movies

    It is time now to add some data into your database, let's insert a few movies, using redis-cli or RedisInsight.

    Once you are connected to your Redis instance run the following commands:

    HSET movie:11002 title "Star Wars: Episode V - The Empire Strikes Back" plot "After the Rebels are brutally overpowered by the Empire on the ice planet Hoth, Luke Skywalker begins Jedi training with Yoda, while his friends are pursued by Darth Vader and a bounty hunter named Boba Fett all over the galaxy." release_year 1980 genre "Action" rating 8.7 votes 1127635 imdb_id tt0080684
    HSET movie:11003 title "The Godfather" plot "The aging patriarch of an organized crime dynasty transfers control of his clandestine empire to his reluctant son." release_year 1972 genre "Drama" rating 9.2 votes 1563839 imdb_id tt0068646
    HSET movie:11004 title "Heat" plot "A group of professional bank robbers start to feel the heat from police when they unknowingly leave a clue at their latest heist." release_year 1995 genre "Thriller" rating 8.2 votes 559490 imdb_id tt0113277
    HSET "movie:11005" title "Star Wars: Episode VI - Return of the Jedi" genre "Action" votes 906260 rating 8.3 release_year 1983  plot "The Rebels dispatch to Endor to destroy the second Empire's Death Star." ibmdb_id "tt0086190"

    Now it is possible to get information from the hash using the movie ID. For example if you want to get the title, and rating execute the following command:

    >> HMGET movie:11002 title rating

    Result:

    1) "Star Wars: Episode V - The Empire Strikes Back"
    2) "8.7"

    Increment the Movie Rating

    You can increment the rating of this movie using:

    HINCRBYFLOAT movie:11002 rating 0.1

    Here's a quick screenshot of the results shown in RedisInsight:

    MyImage

    But how do you get a movie or list of movies by year of release, rating or title?

    One option, would be to read all the movies, check all fields and then return only matching movies; no need to say that this is a really bad idea. Nevertheless this is where Redis developers often create custom secondary indexes using SET/SORTED SET structures that point back to the movie hash. This needs some heavy design and implementation.

    This is where the Redis Stack can help, and part of why it was created.

    Redis Stack greatly simplifies this by offering a simple and automatic way to create secondary indices on Redis Hashes. (more datastructure will eventually come)

    Secondary Index

    Using Redis Satck if you want to query on a field, you must first index that field. Let's start by indexing the following fields for our movies:

    • Title
    • Release Year
    • Rating
    • Genre

    When creating an index you define:

    • which data you want to index: all hashes with a key starting with movies
    • which fields in the hashes you want to index using a Schema definition.

    Warning: Do not index all fields

    Indexes take space in memory, and must be updated when the primary data is updated. So create the index carefully and keep the definition up to date with your needs.

    Step 6. Create the Index

     FT.CREATE idx:movie ON hash PREFIX 1 "movie:" SCHEMA title TEXT SORTABLE release_year NUMERIC SORTABLE rating NUMERIC SORTABLE genre TAG SORTABLE

    The database contains a few movies, and an index, it is now possible to execute some queries.

    Query: All the movies that contains the string "war"

    FT.SEARCH idx:movie "war"

    Result:

    1) 2
    2) "movie:11005"
    3) 1) "title"
    2) "Star Wars: Episode VI - Return of the Jedi"
    3) "votes"
    4) "906260"
    5) "plot"
    6) "The Rebels dispatch to Endor to destroy the second Empire's Death Star."
    7) "rating"
    8) "8.3"
    9) "release_year"
    10) "1983"
    11) "ibmdb_id"
    12) "tt0086190"
    13) "genre"
    14) "Action"
    4) "movie:11002"
    5) 1) "title"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "votes"
    4) "1127635"
    5) "plot"
    6) "After the Rebels are brutally overpowered by the Empire on the ice planet Hoth, Luke Skywalker begins Jedi training with Yoda, while his friends are pursued by Darth Vader and a bounty hunter named Boba Fett all over the galaxy."
    7) "rating"
    8) "8.8"
    9) "release_year"
    10) "1980"
    11) "genre"
    12) "Action"
    13) "imdb_id"
    14) "tt0080684"
    >

    Query: Limit the list of fields returned by the query using the RETURN parameter

    The FT.SEARCH commands returns a list of results starting with the number of results, then the list of elements (keys & fields).

    FT.SEARCH idx:movie "war" RETURN 2 title release_year

    Result:

    1) 2
    2) "movie:11005"
    3) 1) "title"
    2) "Star Wars: Episode VI - Return of the Jedi"
    3) "release_year"
    4) "1983"
    4) "movie:11002"
    5) 1) "title"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "release_year"
    4) "1980"
    >

    As you can see the movie Star Wars: Episode V - The Empire Strikes Back is found, even though you used only the word “war” to match “Wars” in the title. This is because the title has been indexed as text, so the field is tokenized and stemmed.

    Later when looking at the query syntax in more detail you will learn more about the search capabilities.

    It is also possible to limit the list of fields returned by the query using the RETURN parameter, let's run the same query, and return only the title and release_year.

    Query: All the movies that contains the string "war" but NOT the "jedi" one

    Adding the string -Jedi (minus) will ask the query engine not to return values that contain jedi.

    FT.SEARCH idx:movie "war -Jedi" RETURN 2 title release_year

    Result:

    1) 1
    2) "movie:11002"
    3) 1) "title"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "release_year"
    4) "1980"

    All the movies that contains the string "gdfather using fuzzy search"

    FT.SEARCH "idx:movie" " %gdfather% " RETURN 2 title release_year

    Result:

    1) 1
    2) "movie:11003"
    3) 1) "title"
    2) "The Godfather"
    3) "release_year"
    4) "1972"

    Query: All Thriller movies

    FT.SEARCH "idx:movie" "@genre:{Thriller}" RETURN 2 title release_year

    Result:

    1) 1
    2) "movie:11004"
    3) 1) "title"
    2) "Heat"
    3) "release_year"
    4) "1995"

    Query: All Thriller or Action movies

    FT.SEARCH "idx:movie" "@genre:{Thriller|Action}" RETURN 2 title release_year

    Result:

    1) 3
    2) "movie:11004"
    3) 1) "title"
    2) "Heat"
    3) "release_year"
    4) "1995"
    4) "movie:11005"
    5) 1) "title"
    2) "Star Wars: Episode VI - Return of the Jedi"
    3) "release_year"
    4) "1983"
    6) "movie:11002"
    7) 1) "title"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "release_year"
    4) "1980"

    Query : All the movies released between 1970 and 1980 (included)

    The FT.SEARCH syntax has two ways to query numeric fields:

    • using the FILTER parameter
    FT.SEARCH "idx:movie" "@genre:{Thriller|Action}" FILTER release_year 1970  1980 RETURN 2 title release_year

    Result:

    1) 1
    2) "movie:11002"
    3) 1) "title"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "release_year"
    4) "1980"

    Step 8. Aggregation

    Query: Number of movies by year

    FT.AGGREGATE "idx:movie" "*" GROUPBY 1 @release_year REDUCE COUNT 0 AS nb_of_movies

    Result:

    1) 4
    2) 1) "release_year"
    2) "1983"
    3) "nb_of_movies"
    4) "1"
    3) 1) "release_year"
    2) "1995"
    3) "nb_of_movies"
    4) "1"
    4) 1) "release_year"
    2) "1980"
    3) "nb_of_movies"
    4) "1"
    5) 1) "release_year"
    2) "1972"
    3) "nb_of_movies"
    4) "1"

    Query: Number of movies by year from the most recent to the oldest

    FT.AGGREGATE "idx:movie" "*" GROUPBY 1 @release_year REDUCE COUNT 0 AS nb_of_movies SORTBY 2 @release_year DESC

    Result:

    1) 4
    2) 1) "release_year"
    2) "1995"
    3) "nb_of_movies"
    4) "1"
    3) 1) "release_year"
    2) "1983"
    3) "nb_of_movies"
    4) "1"
    4) 1) "release_year"
    2) "1980"
    3) "nb_of_movies"
    4) "1"
    5) 1) "release_year"
    2) "1972"
    3) "nb_of_movies"
    4) "1"

    Redis Launchpad
    - + \ No newline at end of file diff --git a/explore/redisinsightv2/windows/index.html b/explore/redisinsightv2/windows/index.html index 8a547fbf3a..87ad88e819 100644 --- a/explore/redisinsightv2/windows/index.html +++ b/explore/redisinsightv2/windows/index.html @@ -4,7 +4,7 @@ How to run RedisInsight on Windows | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    How to run RedisInsight on Windows


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    RedisInsight is a visual tool that provides capabilities to design, develop and optimize your Redis application. It is a 100% free Redis GUI that allows developers like you to interact with your databases and manage your data.

    RedisInsight v2.0 incorporates a completely new tech stack based on the popular Electron and Elastic UI frameworks. You can run the application locally along with your favorite IDE, and it remains cross-platform, supported on Linux, Windows, and MacOS. RedisInsight Browser lets you explore keys in your Redis server. You can add, edit and delete a key. You can even update the key expiry and copy the key name to be used in different parts of the application.

    RedisInsight Windows Installer

    The RedisInsight desktop client installer for Windows is just 70 MB in size. It allows you to download and use the RedisInsight GUI locally. The desktop client is supported on Windows operating systems and works with all variants of Redis. RedisInsight should install and run on a fresh Windows system.

    info

    There is no need to install the .NET framework in order to install RedisInsight on Windows.

    Getting Started

    • Step 1. Create a free Cloud account
    • Step 2. Create a database
    • Step 3. Verify the database details
    • Step 4. Install RedisInsight
    • Step 5. Connect to the Redis database
    • Step 6. Use Browser Tool
    • Step 7. Clone the repository
    • Step 8. Import user database keys
    • Step 9. Modify a Redis key
    • Step 10. Cleaning up

    Step 1. Create a free Cloud account

    Create your free Redis Enterprise Cloud account. Once you click on “Get Started”, you will receive an email with a link to activate your account and complete your signup process.

    tip

    For a limited time, use TIGER200 to get $200 credits on Redis Enterprise Cloud and try all the advanced capabilities!

    🎉 Click here to sign up

    Step 2. Create a database

    Choose your preferred cloud vendor. Select the region and then click "Let's start free" to create your free database automatically.

    tip

    If you want to create a custom database with your preferred name and type of Redis, click "Create a custom database" option shown in the image.

    create database

    Step 3. Verify the database details

    You will be provided with Public endpoint URL and "Redis Stack" as the type of database with the list of modules that comes by default.

    verify database

    Step 4. Install RedisInsight

    Click on the RedisInsight executable (.exe file) and install it in your system.

    setup redisinsight

    Once the RedisInsight software is installed, click on its icon to open the RedisInsight application. It will display the End-User License Agreement and Privacy Settings. Enable Analytics and Encrypt sensitive information as per your preference.

    accept redisinsight licence

    Step 5. Connect to the Redis Database

    Enter the requested details, including Host (endpoint), Port, and Alias in the form, as shown below. You can use "default" as the username for now. Then click “ADD REDIS DATABASE”.

    adding redis database

    Once added, you will see the database name listed as shown below:

    listing the redis database

    Step 6. Use "Browser Tool"

    Click on the "Key" icon on the left sidebar to open up the browser tool.

    redis database with no keys

    Step 5. Overview of User database keys

    Let us import a user database (6k keys). This dataset contains users stored as Redis Hashes.

    Users

    The user hashes contain the following fields:

    • user:id : The key of the hash.
    • first_name : First Name.
    • last_name : Last name.
    • email : email address.
    • gender : Gender (male/female).
    • ip_address : IP address.
    • country : Country Name.
    • country_code : Country Code.
    • city : City of the user.
    • longitude : Longitude of the user.
    • latitude : Latitude of the user.
    • last_login : Epoch time of the last login.

    Step 6. Clone the repository

    Open up the CLI terminal and run the following commands:

     git clone https://github.com/redis-developer/redis-datasets
    cd redis-datasets/user-database

    Step 7. Import the user database keys

    Open up the CLI terminal and run the following command.

    note

    You will need a hostname, port and password to run this for a cloud database.

     redis-cli -h redis-18386.c110-qa.us-east-1-1.ec2.cloud.redislabs.com -p 18386 -a <enter your password> < ./import_users.redis

    Refresh the keys view by clicking as shown below:

    listing the keys

    You can get a real-time view of the data in your Redis database as shown below:

    Select any key in the keys view and the key's value gets displayed in the right hand side that includes fields and values.

    hash keys listed

    Step 8. Modify a key

    The RedisInsight browser tool allows you to modify the data instantly. Select any key and change the values as shown in the following screenshot

    modify the redis keys

    Step 9. Cleaning up

    Run the following command to clean up all the Redis keys:

    flushing the database

    Further References

    Redis Launchpad
    - + \ No newline at end of file diff --git a/explore/redismod/index.html b/explore/redismod/index.html index 940e8cba17..1e88bdda2c 100644 --- a/explore/redismod/index.html +++ b/explore/redismod/index.html @@ -4,7 +4,7 @@ Redis Modules in a Docker Container | The Home of Redis Developers - + @@ -15,7 +15,7 @@ This image is based on the official image of Redis from Docker. By default, the container starts with Redis' default configuration and all included modules loaded.

    Features included in the container

    Step 1. Install Docker

    To use RedisMod on a local Mac, the first step is to install Docker for your operating system. Run the docker version command in a terminal window to make sure that docker is installed correctly.

     docker version

    It should display Docker Engine Server and Client version successfully.

    Step 2. Running Redismod Docker container

     docker run -d -p 6379:6379 redislabs/redismod

    Step 3. Connect to Redis database

    You can either use redis-cli or use RedisInsight to connect to Redis database. Let's try using redis-cli as shown below:

     redis-cli

    Step 4. Verify if all the Redis modules are getting loaded

     $ redis-cli
    127.0.0.1:6379> info modules
    # Modules
    module:name=rg,ver=10006,api=1,filters=0,usedby=[],using=[ai],options=[]
    module:name=ai,ver=10002,api=1,filters=0,usedby=[rg],using=[],options=[]
    module:name=timeseries,ver=10408,api=1,filters=0,usedby=[],using=[],options=[]
    module:name=bf,ver=20205,api=1,filters=0,usedby=[],using=[],options=[]
    module:name=graph,ver=20402,api=1,filters=0,usedby=[],using=[],options=[]
    module:name=ReJSON,ver=10007,api=1,filters=0,usedby=[],using=[],options=[]
    module:name=search,ver=20006,api=1,filters=0,usedby=[],using=[],options=[]

    Let us test drive Redis Search as discussed below in detail.

    We are now ready to insert some data. This example uses movies data stored as Redis Hashes, so let’s insert a couple of movies:

      HSET movies:11002 title "Star Wars: Episode V - The Empire Strikes Back" plot "Luke Skywalker begins Jedi training with Yoda." release_year 1980 genre "Action"
    rating 8.7 votes 1127635
     HSET movies:11003 title "The Godfather" plot "The aging patriarch of an organized crime dynasty transfers control of his empire to his son." release_year 1972
    genre "Drama" rating 9.2 votes 1563839

    Your Redis database now contains two Hashes. It is simple to retrieve information using the HMGET command, if you know the key of the movies (movies:11002):

     HMGET movies:11002 title rating

    To be able to query the hashes on the field for title, say, or genre, you must first create an index. To create an index, you must define a schema to list the fields and their types that are indexed, and that you can use in your queries.

    Use the FT.CREATE command to create an index, as shown here:

     FT.CREATE idx:movies ON hash PREFIX 1 "movies:" SCHEMA title TEXT SORTABLE release_year NUMERIC SORTABLE rating NUMERIC SORTABLE genre TAG SORTABLE

    Search the movies in the Redis Search index

    You can now use the FT.SEARCH to search your database, for example, to search all movies sorted by release year:

     FT.SEARCH idx:movies * SORTBY release_year ASC RETURN 2 title release_year

    To test drive rest of Redis modules, please visit the links mentioned under "References" section.

    - + \ No newline at end of file diff --git a/explore/riot/index.html b/explore/riot/index.html index b91ec420b9..099bb30044 100644 --- a/explore/riot/index.html +++ b/explore/riot/index.html @@ -4,7 +4,7 @@ RIOT | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    RIOT


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Redis Input/Output Tools (RIOT) is a set of import/export command line utilities for Redis:

    • RIOT Redis: live replication from any Redis database (including AWS Elasticache) to another Redis database.
    • RIOT DB: migrate from an RDBMS to Redis

    Using RIOT Redis

    Most database migration tools available today are offline in nature. Migrating data from AWS ElastiCache to Redis Enterprise Cloud for example means backing up your Elasticache data to an AWS S3 bucket and importing it into Redis Enterprise Cloud using its UI.This implies some downtime and might result in data loss. Other available techniques include creating point-in-time snapshots of the source Redis server & applying the changes to the destination servers to keep both servers in sync. It might sound like a good approach but can be challenging when you have to maintain dozens of scripts to implement the migration strategy.

    RIOT Redis is a migration tool that allows for seamless live replication between two Redis databases.

    Step 1. Getting Started

    Download the latest release and unzip the archive.

    Launch the bin/riot-redis script and follow the usage information provided.

    Step 2. Build and Run

    git clone https://github.com/redis-developer/riot.git
    cd riot/riot-redis
    ./riot-redis

    Step 3. Install via Homebrew (macOS)

    brew install jruaux/tap/riot-redis`

    Usage

    ❯ riot-redis
    Usage: {app} [OPTIONS] [COMMAND]
    --help Show this help message and exit.
    -V, --version Print version information and exit.
    -q, --quiet Log errors only
    -d, --debug Log in debug mode (includes normal stacktrace)
    -i, --info Set log level to info

    You can use --help on any subcommand:

    ❯ riot-redis --help

    ❯ riot-redis import --help

    ❯ riot-redis import .. hset --help

    Redis connection options are the same as redis-cli:

      -h, --hostname=<host>     Server hostname (default: 127.0.0.1)
    -p, --port=<port> Server port (default: 6379)
    -s, --socket=<socket> Server socket (overrides hostname and port)
    --user=<username> Used to send ACL style 'AUTH username pass'. Needs password.
    -a, --pass[=<password>] Password to use when connecting to the server
    -u, --uri=<uri> Server URI
    -o, --timeout=<sec> Redis command timeout (default: 60)
    -n, --db=<int> Database number (default: 0)
    -c, --cluster Enable cluster mode
    -t, --tls Establish a secure TLS connection
    -l, --latency Show latency metrics
    -m, --pool=<int> Max pool connections (default: 8)

    Redis URI syntax is described here.

    Step 4. Example

    Here is an example of a live replication from a source Redis running on localhost and port 6379, to a target Redis running on localhost and port 6380:

    ❯ riot-redis -h source -p 6379 replicate --idle-timeout 500 -h target -p 6380 --live

    Step 5. Verification

    Once replication is complete RIOT Redis will perform a verification step to compare values and TTLs between source and target databases. The output looks like this:

    OK:1000 V:0 >:0 <:0 T:0
    • OK: # identical values

    • V: # mismatched values

    • : # keys only present in source database

    • <: # keys only present in target database

    • T: # keys with TTL difference greater than tolerance

    Step 6. Architecture

    RIOT Redis implements client-side replication using a producer/consumer approach:

    • the producer is connected to the source Redis (e.g. ElastiCache) and iterates over keys to read their corresponding values

    • the consumer is connected to the target Redis (e.g. Redis Enterprise Cloud) and writes the key/value tuples previously created

    1. Key reader: initiates a SCAN and optionally calls SUBSCRIBE to listen for keyspace notifications (live replication).
    2. Value reader: takes the keys and calls DUMP and TTL.
    3. Key/Value writer: takes key/value/ttl tuples and calls RESTORE and EXPIRE.
    note

    Live replication makes use of keyspace notifications. Make sure the source Redis database has keyspace notifications enabled using notify-keyspace-events = KA in redis.conf or via CONFIG SET.

    note

    The live replication mechanism does not guarantee data consistency. Redis sends keyspace notifications over pub/sub which does not provide guaranteed delivery. It is possible that RIOT Redis can miss some notifications in case of network failures for example.

    Using RIOT DB

    RIOT DB lets you import/export data from relational databases.

    Step 1. Getting Started

    Download the latest release and unzip the archive.

    Launch the bin/riot-db script and follow the usage information provided.

    Step 2. Build and Run

    ❯ git clone https://github.com/redis-developer/riot.git
    ❯ cd riot/riot-db
    ❯ ./riot-db

    Step 3. Install via Homebrew (macOS)

    brew install jruaux/tap/riot-db

    Step 4. Usage

    ❯ riot-db
    Usage: riot-db [OPTIONS] [COMMAND]
    --help Show this help message and exit.
    -V, --version Print version information and exit.
    -q, --quiet Log errors only
    -d, --debug Log in debug mode (includes normal stacktrace)
    -i, --info Set log level to info

    You can use --help on any subcommand:

    ❯ riot-db --help
    ❯ riot-db import --help
    ❯ riot-db import … hset --help

    Redis connection options are the same as redis-cli:

      -h, --hostname=<host>     Server hostname (default: 127.0.0.1)
    -p, --port=<port> Server port (default: 6379)
    -s, --socket=<socket> Server socket (overrides hostname and port)
    --user=<username> Used to send ACL style 'AUTH username pass'. Needs password.
    -a, --pass[=<password>] Password to use when connecting to the server
    -u, --uri=<uri> Server URI
    -o, --timeout=<sec> Redis command timeout (default: 60)
    -n, --db=<int> Database number (default: 0)
    -c, --cluster Enable cluster mode
    -t, --tls Establish a secure TLS connection
    -l, --latency Show latency metrics
    -m, --pool=<int> Max pool connections (default: 8)

    Step 5. Drivers

    RIOT DB includes drivers for the most common RDBMSs:

    Oracle

    jdbc:oracle:thin:@myhost:1521:orcl

    IBM Db2

    jdbc:db2://host:port/database

    MS SQL Server

    jdbc:sqlserver://[serverName[\instanceName][:portNumber]][;property=value[;property=value]]

    MySQL

    jdbc:mysql://[host]:[port][/database][?properties]

    PostgreSQL

    jdbc:postgresql://host:port/database

    SQLite

    jdbc:sqlite:sqlite_database_file_path

    For non-included databases you must install the corresponding JDBC driver under the lib directory and modify the RIOT DB CLASSPATH:

    *nix: bin/riot-db → CLASSPATH=$APP_HOME/lib/myjdbc.jar:$APP_HOME/lib/…
    Windows: bin{app}.bat → set CLASSPATH=%APP_HOME%\lib\myjdbc.jar;%APP_HOME%\lib\…

    Step 6. Import

    Use the import command to import the result set of a SQL statement.

    Import from PostgreSQL

    ❯ riot-db -h localhost -p 6379 import "SELECT * FROM orders" --url jdbc:postgresql://host:port/database --username appuser --password passwd hset --keyspace order --keys order_id

    You can specify one or many Redis commands as targets of the import:

    Import into hashes

    ❯ riot-db import .. set --keyspace blah --keys id

    Import into hashes and set TTL on the key

    ❯ riot-db import .. hset --keyspace blah --keys id expire --keyspace blah --keys id

    Import into hashes and set TTL and add to a set named myset

    ❯ riot-db import .. hset --keyspace blah --keys id expire --keyspace blah --keys id sadd --keyspace myset --members id

    Step 7. Export

    Export to PostgreSQL

    ❯ riot-db export "INSERT INTO mytable (id, field1, field2) VALUES (CAST(:id AS SMALLINT), :field1, :field2)" --url jdbc:postgresql://host:port/database --username appuser --password passwd --scan-match "hash:*" --key-regex "hash:(?<id>.*)"

    Import from PostgreSQL to JSON strings

    ❯ riot-db -h localhost -p 6379 import "SELECT * FROM orders" --url jdbc:postgresql://host:port/database --username appuser --password passwd set --keyspace order --keys order_id

    This will produce Redis strings that look like this:

    {
    "order_id": 10248,
    "customer_id": "VINET",
    "employee_id": 5,
    "order_date": "1996-07-04",
    "required_date": "1996-08-01",
    "shipped_date": "1996-07-16",
    "ship_via": 3,
    "freight": 32.38,
    "ship_name": "Vins et alcools Chevalier",
    "ship_address": "59 rue de l'Abbaye",
    "ship_city": "Reims",
    "ship_postal_code": "51100",
    "ship_country": "France"
    }

    Further References

    - + \ No newline at end of file diff --git a/explore/what-is-redis/index.html b/explore/what-is-redis/index.html index 9d0314f2fd..b4fb90d098 100644 --- a/explore/what-is-redis/index.html +++ b/explore/what-is-redis/index.html @@ -4,7 +4,7 @@ Redis: In-memory database. How it works and Why you should use it | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    What Is Redis?


    Profile picture for Will Johnston
    Author:
    Will Johnston, Developer Growth Manager at Redis

    Redis is an open source, in-memory, key-value data store most commonly used as a primary database, cache, message broker, and queue. Redis delivers sub-millisecond response times, enabling fast and powerful real-time applications in industries such as gaming, fintech, ad-tech, social media, healthcare, and IoT.

    Redis is the most-loved database by developers for five years running. Developers love Redis because of its ease of use, performance, and scalability. There is a Redis client available for use in every popular modern programming language. This, coupled with the performance benefits, makes Redis the most popular choice for caching, session management, gaming, fraud detection, leaderboards, real-time analytics, geospatial indexing, ride-sharing, social media, and streaming applications.

    Redis Enterprise is the only true datastore built for hybrid and multicloud applications. Get started with Redis Enterprise Cloud.

    Redis on AWS

    You might be familiar with Amazon ElastiCache for Redis. It is a Redis-compatible cache service that is available on AWS. Redis Enterprise Cloud on AWS is a fully-managed Redis Enterprise as a service and supports Redis as a cache and a database. Learn more about Redis on AWS.

    Redis on other cloud providers

    Benefits of Redis

    Performance

    The primary benefit of Redis is its sub-millisecond queries. Redis runs in-memory, which enables low-latency and high throughput. Running in-memory means requests for data do not require a trip to disk. This leads to an order of magnitude more operations and faster response times. Redis is one of the only databases that supports millions of operations per second.

    Flexible data structures

    Redis is a multi-model database, and provides several built-in data structures such as:

    • Strings - any text or binary data (512MB max.)
    • Hashes - field-value pairs that most commonly represent objects
    • Lists - a collection of Strings ordered by when they were added as a linked list. Useful for queues and "latest updates" for social media posts
    • Sets - an unordered collection of Strings with the ability to intersect, union, and diff against other Sets
    • Sorted Sets - similar to a Redis Set, the Sorted Set is a collection of unique String members. In a Sorted Set, each member is associated with a score that can be used to sort the collection.
    • Bitmaps - not necessarily a data type, but a set of bit-oriented operations on the String type
    • HyperLogLogs - a probabilistic data structure used in order to count unique things (cardinality of a set)
    • Geospatial - a Sorted Set of longitude/latitude/name key-value pairs useful for maps, geosearching, and "nearby" features
    • Streams - a data type that models an append only log and which can be used as a durable message queue

    Redis Stack

    Redis Stack extends Redis with modules that provide additional data structures and capabilities. Redis Stack is available in Redis Enterprise Cloud and Redis Enterprise Software. Redis Stack includes:

    • Search - a real-time search and secondary indexing engine that runs on your Redis dataset and allows you to query data that has just been indexed
    • JSON - a native JSON data type tailored for fast, efficient, in-memory storage and retrieval of JSON documents at high speed and volume
    • Time Series - a time series data type with capabilities like automatic downsampling, aggregations, labeling and search, compression, and enhanced multi-range queries as well as built-in connectors to popular monitoring tools like Prometheus and Grafana to enable the extraction of data into useful formats for visualization and monitoring
    • Probabilistic Data - provides Redis with support for additional probabilistic data structures and allows for constant memory space and extremely fast processing while still maintaining a low error rate. Supports Bloom and Cuckoo filters to determine whether an item is present or absent from a collection with a given degree of certainty, Count-min sketch to count the frequency of the different items in sub-linear space, and Top-K to count top k events in a near deterministic manner

    Simplicity and ease-of-use

    Redis makes complex applications easier to write and maintain. Redis presents a simple command and query structure for working with data versus query languages of traditional databases. When building applications you typically are using object-oriented languages, such as Java, Python, PHP, C, C++, C#, JavaScript, TypeScript, Node.js, Ruby, Go, and many others. The built-in data structures of Redis present a natural way of storing data exactly as you use it in object-oriented languages, minimizing impedance mismatch. Redis also provides clients for almost every popular language, making it easy to build applications that can run on any platform.

    Replication and persistence

    Redis offers asynchronous replication where data can be replicated to multiple servers. This allows for improved read performance and faster recovery. Redis Enterprise additionally provides Active-Active Geo-Distribution to ensure that data is distributed across multiple servers in a highly available manner for both reads and writes. Redis supports point-in-time backups (known as RDB) that lets you copy Redis data to disk or cloud storage.

    While Redis open source was not necessarily developed with an emphasis on durability and consistency as a default, Redis Enterprise provides durability and consistency for Redis and allows you to use Redis as both a cache and a database.

    High availability and scalability

    Redis provides a primary-replica architecture as a single node or cluster. This allows you to build highly available, scalable, and reliable applications on top of Redis. You can easily scale Redis up, down, in, and out to meet application demands.

    Open source

    Redis is open source and available for free, and offers open source clients in many languages. The Redis Stack is also available for download and self-hosting or use in Redis Enterprise Cloud. Read the Redis Stack license for more information.

    Caching

    Redis is the de facto solution for caching in almost every application. Because Redis can handle millions of queries per second and offers high availability and scalability, it is used as a cache to reduce the load on a relational or NoSQL database. This includes database query caching, session caching, page caching, and caching of frequently used objects such as images, files, and application data. Learn more about Redis caching.

    Session storage

    Redis provides sub-millisecond latency at scale, making it a natural choice to store session data. This includes user profile information, OAuth tokens, credentials, session state, and more. Learn more about Redis session storage.

    Fraud detection

    Redis is built to handle real-time AI and machine learning workloads because of its scalability and high write throughput at low latency. Redis is often used as a primary database, enabling deep learning models directly where the data lives. Bloom filters, time series, and other data structures that work natively with Redis enable cost reduction with high-speed statistical analysis. Learn more about Redis fraud detection.

    Real-time inventory

    Retailers need to ensure that their real-time inventory systems can survive seasonal peaks, maintain data consistency, and deliver instant results. Redis is a great choice for this use case. It is a highly available and highly scalable database that can handle millions of queries per second. Redis clusters can be configured to replicate across multiple servers in a highly available manner, enabling data consistency between stores. Learn more about Redis for real-time inventory management.

    Claims processing

    Insurance companies need to process claims in real time, and they receive millions of claims daily. Redis provides sub-millisecond latency and can process millions of requests per second. Redis has built-in data types for building scalable, event-driven architectures. Redis Streams can enable ingesting and analyzing large amounts of data in real time. Learn more about Redis claims processing.

    Gaming leaderboards

    Leaderboards require constant updates and scalability across millions of users. They also require complex mathematical computation, and must be distributed globally. Redis has built-in data types, such as sorted sets, that are useful for manipulating leaderboards. Redis also supports clustering and can be distributed globally. Learn more about Redis gaming leaderboards.

    Messaging

    Microservices and distributed systems need to be able to communicate with each other. Redis provides a simple, fast, and reliable messaging system that can be used for real-time communication between microservices. Redis Streams can be used to enable real-time analytics and data ingestion. Redis Pub/Sub is a lightweight messaging protocol designed for broadcasting and receiving notifications. Redis Lists and Redis Sorted Sets are two native data structures that are great for implementing message queues. Redis also has client libraries in most programming languages that enable you to use your programming language of choice. Learn more about Redis messaging.

    Fast data ingest

    Redis can handle millions of read/write operations per second at sub-millisecond latencies, and it runs on AWS, GCP, Azure, and other cloud platforms. This makes Redis a great choice for processing large volumes of data that arrive in bursts, data from multiple sources/formats, data that needs to be filtered and analyzed, and data that is distributed geographically. Learn more about Redis data ingestion.

    Redis language support

    Redis supports most high-level, popular programming languages and has SDKs built to make it easy to get started. Redis clients are available for the following languages (and more):

    • Python
    • JavaScript
    • Node.js
    • Java
    • Go
    • C/C++
    • C#
    • PHP
    • Ruby
    • Perl
    • Rust

    Redis vs. Memcached

    Both Redis and Memcached are open source, powerful, in-memory data stores. The main difference between the two is that Redis is a more full-featured database that is built to fit a number of different use cases. Memcached is primarily used for key/value caching. Redis is used for both caching and as a database.

    How to host Redis

    You can sign up for Redis Enterprise Cloud for free, and when you create your subscription you can specify that you want to use AWS, GCP, or Azure. You can also configure the geographic region where you want to host Redis. Redis Enterprise Cloud is a great option when choosing Redis because:

    1. It is a fully managed service that provides a single point of contact for all your Redis clusters.
    2. It is the only managed service that provides Redis Stack and turn Redis into a multi-model database.
    3. It is built to scale with enterprise clustering, Redis-on-Flash, and Active-Active geo-distribution using CRDTs.

    Getting started with Redis

    If you are ready to start building applications using Redis, check out our tutorials for Redis that let you use your programming language of choice!

    - + \ No newline at end of file diff --git a/guide/security/how-to-use-ssl-tls-with-redis-enterprise/index.html b/guide/security/how-to-use-ssl-tls-with-redis-enterprise/index.html index c541472c9b..7171a88443 100644 --- a/guide/security/how-to-use-ssl-tls-with-redis-enterprise/index.html +++ b/guide/security/how-to-use-ssl-tls-with-redis-enterprise/index.html @@ -4,7 +4,7 @@ How to Use SSL/TLS With Redis Enterprise | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    How to Use SSL/TLS With Redis Enterprise


    Profile picture for Tugdual Grall
    Author:
    Tugdual Grall, Former Technical Marketing Manager at Redis

    Header

    In this article, I will explain how to secure your Redis databases using SSL (Secure Sockets Layer). In production, it is a good practice to use SSL to protect the data that are moving between various computers (client applications and Redis servers). Transport Level Security (TLS) guarantees that only allowed applications/computers are connected to the database, and also that data is not viewed or altered by a middle man process.

    You can secure the connections between your client applications and Redis cluster using:

    • One-Way SSL: the client (your application) get the certificate from the server (Redis cluster), validate it, and then all communications are encrypted
    • Two-Way SSL: (aka mutual SSL) here both the client and the server authenticate each other and validate that both ends are trusted.

    In this article, I will focus on the Two-Way SSL, and using Redis Enterprise.

    Prerequisites:

    • A Redis Enterprise 6.0.x database, (my database is protected by the password secretdb01, and listening on port 12000)
    • redis-cli to run basic commands
    • Python, Node, and Java installed if you want to test various languages.

    Simple Test

    Let's make sure that the database is available:

    redis-cli -p 12000 -a secretdb01 INFO SERVER

    This should print the Server information.

    1- Get the Certificate from Redis Cluster

    You have access to the Redis Enterprise Cluster, you go to one of the nodes to retrieve the certificate (that is a self-generated one by default).

    The cluster certificate is located at: /etc/opt/redislabs/proxy_cert.pem.

    You have to copy it on each client machine; note that once it is done you can use this certificate to connect using "One-Way SSL", but not the purpose of this article.

    In my demonstration I am using Docker and copy the certificate using this command from my host:

    docker cp redis-node1:/etc/opt/redislabs/proxy_cert.pem ./certificates

    2- Generate a New Client Certificate

    Using the Two-Way SSL you need to have a certificate for the client that will be used by Redis database proxy to trust the client.

    In this article I will use a self-signed certificate using OpenSSL, in this example, we are creating a certificate for an application named app_001.

    You can create as many certificates as you want, or reuse this one for all servers/applications.

    Open a terminal and run the following commands:


    openssl req \
    -nodes \
    -newkey rsa:2048 \
    -keyout client_key_app_001.pem \
    -x509 \
    -days 36500 \
    -out client_cert_app_001.pem

    This command generate a new client key (client_key_001.pem) and certificate (client_cert_001.pem) with no passphrase.

    3- Configure the Redis Database

    The next step is to take the certificate and add it to the database you want to protect.

    Let's copy the certificate and paste it into the Redis Enterprise Web Console.

    Copy the certificate in your clipboard:

    Mac:

    pbcopy < client_cert_app_001.pem

    Linux:

     xclip -sel clip < client_cert_app_001.pem

    Windows:

    clip < client_cert_app_001.pem

    Go to the Redis Enterprise Admin Web Console and enable TLS on your database:

    1. Edit the database configuration
    2. Check TLS
    3. Select "Require TLS for All communications"
    4. Check "Enforce client authentication"
    5. Paste the certificate in the text area
    6. Click the Save button to save the certificate
    7. Click the Update button to save the configuration.

    Security Configuration

    The database is now protected, and it is mandatory to use the SSL certificate to connect to it.

    redis-cli -p 12000 -a secretdb01 INFO SERVER
    (error) ERR unencrypted connection is prohibited

    4- Connect to the Database using the Certificate

    In all following examples, I am using a "self-signed" certificate, so I do not check the validity of the hostname. You should adapt the connections/TLS information based on your certificate configuration.

    4.1 Using Redis-CLI

    To connect to a SSL protected database using redis-cli you have to use stunnel.

    Create a stunnel.conf file with the following content:

    cert = /path_to/certificates/client_cert_app_001.pem
    key = /path_to/certificates/client_key_app_001.pem
    cafile = /path_to/certificates/proxy_cert.pem
    client = yes

    [redislabs]
    accept = 127.0.0.1:6380
    connect = 127.0.0.1:12000

    Start stunnel using the command

    stunnel ./stunnel.conf

    This will start a process that listen to port 6380 and used as a proxy to the Redis Enterprise database on port 12000.

    redis-cli -p 6380 -a secretdb01 INFO SERVER

    4.2 Using Python

    Using Python, you have to set the SSL connection parameters:

    #!/usr/local/bin/python3

    import redis
    import pprint

    try:
    r = redis.StrictRedis(
    password='secretdb01',
    decode_responses=True,
    host='localhost',
    port=12000,
    ssl=True,
    ssl_keyfile='./client_key_app_001.pem',
    ssl_certfile='./client_cert_app_001.pem',
    ssl_cert_reqs='required',
    ssl_ca_certs='./proxy_cert.pem',
    )

    info = r.info()
    pprint.pprint(info)

    except Exception as err:
    print("Error connecting to Redis: {}".format(err))

    More information in the documentation "Using Redis with Python".

    4.3 Using Node.JS

    For Node Redis, use the TLS library to configure the client connection:

    import { createClient } from 'redis';
    import tls from 'tls';
    import fs from 'fs';

    const ssl = {
    key: fs.readFileSync(
    '../certificates/client_key_app_001.pem',
    {encoding: 'ascii'},
    ),
    cert: fs.readFileSync(
    '../certificates/client_cert_app_001.pem',
    {encoding: 'ascii'},
    ),
    ca: [fs.readFileSync('../certificates/proxy_cert.pem', {encoding: 'ascii'})],
    checkServerIdentity: () => {
    return null;
    },
    };

    const client = redis.createClient({
    // replace with your connection string
    url: 'rediss://localhost:12000',
    socket: {
    tls: true,
    key: ssl.key,
    cert: ssl.cert,
    ca: ssl.ca,
    },
    });

    client.info('SERVER', function (err, reply) {
    console.log(reply);
    });

    await client.connect();

    More information in the documentation "Using Redis with Node.js".

    4.4 Using Java

    In Java, to be able to connect using SSL, you have to install all the certificates in the Java environment using the keytool utility.

    Create a keystore file that stores the key and certificate you have created earlier:

    openssl pkcs12 -export \
    -in ./client_cert_app_001.pem \
    -inkey ./client_key_app_001.pem \
    -out client-keystore.p12 \
    -name "APP_01_P12"

    As you can see the keystore is used to store the credentials associated with you client; it will be used later with the -javax.net.ssl.keyStore system property in the Java application.

    In addition to the keys tore, you also have to create a trust store, that is used to store other credentials for example in our case the redis cluster certificate.

    Create a trust store file and add the Redis cluster certificate to it

    keytool -genkey \
    -dname "cn=CLIENT_APP_01" \
    -alias truststorekey \
    -keyalg RSA \
    -keystore ./client-truststore.p12 \
    -keypass secret
    -storepass secret
    -storetype pkcs12
    keytool -import \
    -keystore ./client-truststore.p12 \
    -file ./proxy_cert.pem \
    -alias redis-cluster-crt

    The trustore will be used later with the -javax.net.ssl.trustStore system property in the Java application.

    You can now run the Java application with the following environment variables:

    java -Djavax.net.ssl.keyStore=/path_to/certificates/java/client-keystore.p12 \
    -Djavax.net.ssl.keyStorePassword=secret \
    -Djavax.net.ssl.trustStore=/path_to/certificates/java/client-truststore.p12 \
    -Djavax.net.ssl.trustStorePassword=secret \
    -jar MyApp.jar

    For this example and simplicity, I will hard code these property in the Java code itself:


    import redis.clients.jedis.Jedis;
    import java.net.URI;

    public class SSLTest {

    public static void main(String[] args) {

    System.setProperty("javax.net.ssl.keyStore", "/path_to/certificates/client-keystore.p12");
    System.setProperty("javax.net.ssl.keyStorePassword", "secret");

    System.setProperty("javax.net.ssl.trustStore","/path_to/certificates/client-truststore.p12");
    System.setProperty("javax.net.ssl.trustStorePassword","secret");

    URI uri = URI.create("rediss://127.0.0.1:12000");

    Jedis jedis = new Jedis(uri);
    jedis.auth("secretdb01");


    System.out.println(jedis.info("SERVER"));
    jedis.close();
    }

    }
    • line 8-12, the system environment variables are set to point to the keystore and trust store (this should be externalized)
    • line 14, the Redis URL start with rediss with 2 s to indicate that the connection should be encrypted
    • line 17, set the database password

    More information in the documentation "Using Redis with Java".

    Conclusion

    In this article, you have learned how to:

    • retrieve the Redis Server certificate
    • generate a client certificate
    • protect your database to enforce transport level security (TLS) with 2 ways authentication
    • connect to the database from redis-cli, Python, Node and Java
    - + \ No newline at end of file diff --git a/guides/data-modeling/index.html b/guides/data-modeling/index.html index 26aa53e006..50fb88c6c4 100644 --- a/guides/data-modeling/index.html +++ b/guides/data-modeling/index.html @@ -4,7 +4,7 @@ Data Modeling for Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Data Modeling for Redis

    Introduction

    - + \ No newline at end of file diff --git a/guides/import-data/index.html b/guides/import-data/index.html index ea30a53c5e..4c418d093f 100644 --- a/guides/import-data/index.html +++ b/guides/import-data/index.html @@ -4,7 +4,7 @@ Import Data into Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Import Data into Redis

    Import data to the Cloud

    Import using RIOT

    $ riot -is -cool

    - + \ No newline at end of file diff --git a/guides/import/database-migration-aws-elasticache-redis-enterprise-cloud/index.html b/guides/import/database-migration-aws-elasticache-redis-enterprise-cloud/index.html index bb849756a1..0c51d5194f 100644 --- a/guides/import/database-migration-aws-elasticache-redis-enterprise-cloud/index.html +++ b/guides/import/database-migration-aws-elasticache-redis-enterprise-cloud/index.html @@ -4,7 +4,7 @@ Online Database Migration from Amazon ElastiCache to Redis Enterprise Cloud using RIOT | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Online Database Migration from Amazon ElastiCache to Redis Enterprise Cloud using RIOT

    Authors: Ajeet Singh Raina, Julien Ruaux

    Most of the database migration tools available today are offline in nature. They are complex and require manual intervention.

    If you want to migrate your data from Amazon ElastiCache to Redis Enterprise Cloud, for example, the usual process is to back up your ElastiCache data to an Amazon S3 bucket and then import your data using the Redis Enterprise Cloud UI. This process can require painful downtime and could result in data loss. Other available techniques include creating point-in-time snapshots of the source Redis server and applying the changes to the destination servers to keep both the servers in sync. That might sound like a good approach, but it can be challenging when you have to maintain dozens of scripts to implement the migration strategy.

    So we’ve come up with a different approach:

    Introducing RIOT

    image

    RIOT is an open source online migration tool built by Julien Ruaux, a Solution Architect at Redis. RIOT implements client-side replication using a producer/consumer approach. The producer is the combination of the key and value readers that have a connection to ElastiCache. The key reader component identifies keys to be replicated using scan and keyspace notifications. For each key, the value reader component performs a DUMP and handles the resulting key+bytes to the consumer (writer), which performs a RESTORE on the Redis Enterprise connection.

    This blog post will show how to perform a seamless online migration of databases from ElastiCache to Redis Enterprise Cloud.

    Prerequisites:

    You will require a few resources to use the migration tool:

    • A Redis Enterprise Cloud subscription
    • Amazon ElastiCache (a primary endpoint in case of a single-master EC and a configuration endpoint in case of a clustered EC: Refer to Finding Connection Endpoints on the ElastiCache documentation to learn more)
    • An Amazon EC2 instance based on Linux

    Step 1 - Setting up an Amazon EC2 instance

    You can either create a new EC2 instance or leverage an existing one. In our example, we will first create an instance on Amazon Web Services (AWS). The most common scenario is to access an ElastiCache cluster from an Amazon EC2 instance in the same Amazon Virtual Private Cloud (Amazon VPC). We have used Ubuntu 16.04 LTS for this setup, but you can choose the Ubuntu or Debian distribution of your choice.

    Use SSH to connect to this new EC2 instance from your computer as shown here:

    ssh -i “public key” <AWS EC2 Instance>

    Step 2 - Install the redis-cli tool

    $ sudo apt update
    # sudo apt install -y redis-tools

    Verify the connectivity with the ElastiCache database

    Syntax:

    $ redis-cli -h <Elasticache Primary Endpoint > -p 6379

    Command:

    $ sudo redis-cli -h <elasticache primary endpoint> -p 6379

    Ensure that the above command allows you to connect to the remote Redis database successfully.

    Step 3 - Using the RIOT migration tool

    Run the commands below to set up the migration tool.

    Prerequisites:

    Install Java

    We recommended using OpenJDK 11 or later:

    sudo add-apt-repository ppa:openjdk-r/ppa && sudo apt-get update -q && sudo apt install -y openjdk-11-jdk

    Installing RIOT

    Unzip the package and make sure the RIOT binaries are in place, as shown here:

    wget https://github.com/Redislabs-Solution-Architects/riot/releases/download/v2.0.8/riot-redis-2.0.8.zip
    unzip riot-redis-2.0.8.zip
    cd riot-redis-2.0.8/bin/

    You can check the version of RIOT by running the command below:

    ./riot-redis --version
    RIOT version "2.0.8"
    bin/riot-redis --help
    Usage: riot-redis [OPTIONS] [COMMAND]
    -q, --quiet Log errors only
    -d, --debug Log in debug mode (includes normal stacktrace)
    -i, --info Set log level to info
    -h, --help Show this help message and exit.
    -V, --version Print version information and exit.
    Redis connection options
    -r, --redis=<uri> Redis connection string (default: redis://localhost:6379)
    -c, --cluster Connect to a Redis Cluster
    -m, --metrics Show metrics
    -p, --pool=<int> Max pool connections (default: 8)
    Commands:
    replicate, r Replicate a source Redis database in a target Redis database
    info, i Display INFO command output
    latency, l Calculate latency stats
    ping, p Execute PING command

    Once Java and RIOT are installed, we are all set to begin the migration process with the command below, which replicates data directly from the source (ElastiCache) to the target (Redis Enterprise Cloud).

    Step 4 - Migrate the data

    Finally, it’s time to replicate the data from ElastiCache to Redis Enterprise Cloud by running the below command:

    sudo ./riot-redis -r redis://<source Elasticache endpoint>:6379 replicate -r redis://password@<Redis Enterprise Cloud endpoint>:port --live

    ElastiCache allows you to configure in two ways: clustered and non-clustered. In the chart below, the first row shows what commands you should perform for the non-clustered scenario, while the second row shows the command for the clustered scenario with a specific database namespace:

    As you can see, whenever you have a clustered ElastiCache, you need to pass the –cluster option before specifying the source ElastiCache endpoint.

    Important notes

    • Perform user acceptance testing of the migration before using it in production.
    • Once the migration is complete, ensure that application traffic gets successfully redirected to the Redis Enterprise endpoint.
    • Perform the migration process during a period of low traffic to minimize the chance of data loss.

    Conclusion

    If you’re looking for a simple and easy-to-use live migration tool that can help you move data from Amazon ElastiCache to Redis Enterprise Cloud with no downtime, RIOT is a promising option.

    - + \ No newline at end of file diff --git a/guides/import/index.html b/guides/import/index.html index 3a64e069da..b71733d7b0 100644 --- a/guides/import/index.html +++ b/guides/import/index.html @@ -4,7 +4,7 @@ Import Data into Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Import Data into Redis

    Redis offers multiple ways to import data into a database; from an file, an script or from an existing Redis database.

    Import using reds-cli script

    1. Create a simple file users.redis with all the commands you want to run

      HSET 'user:001' first_name 'John' last_name 'doe' dob '12-JUN-1970'
      HSET 'user:002' first_name 'David' last_name 'Bloom' dob '03-MAR-1981'
    2. Use the redis-cli tool to execute the script

      redis-cli -h localhost -p 6379 < users.redis

    This approach will only run the commands and will not impact the existing data, except if you modify existing keys in the script.

    Sample dataset: You can find sample dataset ready to be imported using this method in the https://github.com/redis-developer/redis-datasets repository.


    Restore an RDB file

    If you have an RDB file dump.rdb that contains the data you want you can use this file to create a new database

    1. Copy the dump.rdb file into the Redis working directory

      If you do not know what it is folder you can run the command CONFIG get dir where your Redis instance is up and running

    2. Start the Redis service with the redis-server

    3. The file dump.rdb is automatically imported.

    4. Connect to the database using redis-cli or any other client, to check that data have been imported. (for example SCAN)

    - Warning: Importing data erases all existing content in the database.


    Import & Synchronize using RIOT

    Redis Input/Output Tools (RIOT) is a set of import/export command line utilities for Redis:

    • RIOT DB: migrate from an RDBMS to Redis, Search, JSON, ...
    • RIOT File: bulk import/export data from/to files.
    • RIOT Gen: generate sample Redis datasets for new feature development and proof of concept.
    • RIOT Redis: live replication from any Redis database (including AWS Elasticache) to another Redis database.
    • RIOT Stream: import/export messages from/to Kafka topics.

    Import data into Redis Enterprise

    You can easily import data into Redis Enterprise and Redis Enterprise Cloud, take a look to the following documentation:

    - + \ No newline at end of file diff --git a/guides/index.html b/guides/index.html index 5600c17fde..1776caf9a7 100644 --- a/guides/index.html +++ b/guides/index.html @@ -4,7 +4,7 @@ Guides | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/guides/indexing/index.html b/guides/indexing/index.html index cd44bb66c5..f2c030e49a 100644 --- a/guides/indexing/index.html +++ b/guides/indexing/index.html @@ -4,7 +4,7 @@ Indexing and Querying | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    Indexing and Querying

    Introduction

    Conceptually, Redis is based on the key-value database paradigm. Every piece of data is associated with a key, either directly or indirectly. If you want to retrieve data based on anything besides the key, you’ll need to implement an index that leverages one of the many data types available in Redis.

    You have various ways to create index using Redis core datastructures, for example:

    • Sorted sets to create secondary indexes by ID or other numerical fields.
    • Sorted sets with lexicographical ranges for creating more advanced secondary indexes, composite indexes and graph traversal indexes.
    • Sets for creating random indexes.
    • Lists for creating simple iterable indexes and last N items indexes.

    When using these datastructures you must create your own API to keep the index up-to-date. To simplify and automate this task, Redis has Search that allows indexing and querying.

    The easiest way to index and query data in Redis is to use the Redis Search module.

    You can follow the Redis Search Tutorial to learn more about it and look at the following video from Redis University:

    Querying, Indexing, and Full-text Search in Redis

    If you have questions about Redis Search and other module ask them in the Redis Community Forum.

    - + \ No newline at end of file diff --git a/guides/security/index.html b/guides/security/index.html index 044e25ffde..3000f1645e 100644 --- a/guides/security/index.html +++ b/guides/security/index.html @@ -4,7 +4,7 @@ Redis Security | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/hacktoberfest/index.html b/hacktoberfest/index.html index 1ae04d7c34..9d5d2163d4 100644 --- a/hacktoberfest/index.html +++ b/hacktoberfest/index.html @@ -4,7 +4,7 @@ Hacktoberfest 2021 at Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Hacktoberfest 2021 at Redis


    Profile picture for Suze Shardlow
    Author:
    Suze Shardlow, Developer Community Manager at Redis
    Profile picture for Simon Prickett
    Author:
    Simon Prickett, Principal Developer Advocate at Redis

    (Looking for Hacktoberfest 2022? Find us over at redis.io!)

    Hacktoberfest is a month-long online festival which takes place every year in October. It is sponsored by DigitalOcean and aims to encourage people to get involved in open source projects. Hacktoberfest 2021 has now finished! We've left the below information here so you can see how it worked and check out the recordings of our live streams.

    How to get involved

    We've created a number of GitHub issues for folks who want to contribute to our documentation and demo apps. View our list of open issues.

    Get a GitHub account and Hacktoberfest account

    You'll need a GitHub account to contribute to our repos. Sign up for free at GitHub.com.

    You'll also need to register with Hacktoberfest using your GitHub account if you want to be in with a chance of earning swag from DigitalOcean. Please note that Redis is not involved in allocating or sending swag.

    Finding and working on an issue

    1. Look for a suitable issue on GitHub. Where possible, we have tagged them according to the skillset and level of experience required.

    2. Read the guidance notes on each issue carefully so you know what's expected of you.

    3. Add a comment in the issue stating that you're working on it. To be fair to other contributors, only claim one issue at a time.

      Example Issue

    4. Open a pull request within two calendar days:

      • This is to give more people a fair chance at finding an unclaimed issue.
      • Make sure you reference the issue number in your pull request so that it shows on the issue's page.
      • If you include your Twitter handle, we will give you a shout out.
      • If you're a member of our Discord server, include your Discord handle and we will bestow the Hacktoberfest 2021 role upon you.

      Example Pull Request

      When you do this, your pull request will then be automatically referenced in the issue:

      Example Issue with Pull Request

      • If you don't submit a pull request within two calendar days, we will make the issue available to other contributors.
    5. We will review your pull request. If it's suitable, we'll merge it and add the hacktoberfest-accepted label. If we feel that further work is required, we'll comment as part of our review.

    Read DigitalOcean's complete Hacktoberfest rules here.

    Join the conversation

    Need help with one of our issues, or just want to chat with other contributors? Join us on Discord!

    Looking for more repos?

    If you're looking for more repos to contribute to during Hacktoberfest, check out the Hacktoberfest topic on GitHub. Redis is not responsible for the content of third party repositories.

    Learn more

    Documentation is often cited as a great way to get your feet wet with open source. So to demystify the world of technical writing, we have hosted four live events with our documentation team. Suze Shardlow, Developer Community Manager, sat down with Technical Writers Kaitlyn Michael, Rachel Elledge and Lance Leonard for a series of fireside chats.

    Fireside panel - Suze Shardlow with the documentation team: Technical Writing Explained

    Fireside 1:1 - Suze Shardlow with Kaitlyn Michael: Technical Writing Explained

    Fireside 1:1 - Suze Shardlow with Rachel Elledge: Technical Writing Explained

    Fireside 1:1 - Suze Shardlow with Lance Leonard: Technical Writing Explained

    Contact us

    Hacktoberfest at Redis is brought to you by Suze Shardlow and Simon Prickett of the Redis Developer Relations team. Contact us if you have any questions that aren't addressed here. Please note that we are available during UK daytime.

    We can't debug or refactor your code for you, but if you need help understanding how the project works, write a post in the Hacktoberfest channel on our Discord server.

    - + \ No newline at end of file diff --git a/hacktoberfest/stories/lara-aasem/index.html b/hacktoberfest/stories/lara-aasem/index.html index a75f52c70e..d017b19e3e 100644 --- a/hacktoberfest/stories/lara-aasem/index.html +++ b/hacktoberfest/stories/lara-aasem/index.html @@ -4,7 +4,7 @@ Hacktoberfest Stories: Opening the source of open source | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Hacktoberfest Stories: Opening the source of open source

    Preface by Suze Shardlow, Developer Community Manager at Redis:

    Lara Aasem is a backend software engineer based in Cairo, Egypt. For Hacktoberfest, she contributed an enhancement to Kaboom, which is one of our demo apps. This is her story.

    Open source has always simultaneously fascinated and daunted me. As a backend engineer with a background in sociology, the fact that our world is powered by software built out of empathy, a desire to improve our collective experience, and genuine love of craft—rather than profit—seemed upliftingly radical to me. The open source community's codes of conduct, care for all involved, and emphasis on learning drew me in, but I was still intimidated by it. How do I even start contributing significantly to projects I know nothing about?

    It was there that Hacktoberfest found me. It was 2020 and I was frustrated after a drawn-out attempt to make a sizable contribution to a project went awry because I could never find the time to properly address the review comments for a change of that size. After hearing about the event from coworkers, I realized there was a space in which I could make small, beginner-friendly yet meaningful contributions. While exploring unfamiliar codebases and using languages I may not be super comfortable with was challenging, it was also rewarding, especially with the support of maintainers and the knowledge that I was contributing to building the kind of world I dream about.

    Finding...

    My first experience with Hacktoberfest was so fulfilling, I spent all of 2021 excited for October. When the time came, I once again used www.goodfirstissues.com, an aggregator that lists GitHub issues labeled as goodfirstissue, with the ability to filter by other labels as well as the programming language and name of the repository housing the issue. My criteria when searching for issues were:

    • small and well-documented enough to be worked on in a few hours,
    • in a language I know but am not necessarily proficient in, and
    • exciting because of the nature of the project, the learning opportunities it offers, or (ideally) both.

    This is how I came across an issue by Redis to implement an API validation for a Redis RPG game example built with Kaboom.JS.

    Screen grab of the issue

    It fit all my criteria:

    • As a straightforward change, it would only take a few hours to become familiar with the project, run it, implement the validation, and address any review comments, especially since the project was well-documented and the issue description explained exactly what was required and how to seek help from the maintainers.
    • It was in Node.js, a framework I'm very familiar with.
    • I was excited to learn more about Redis and contribute to a repository that helped other developers learn more about it.

    ... enjoying...

    This last point made the issue all the more enjoyable to work on. I have a high-level understanding of how Redis works and have used it before with Ruby on Rails via redis-rb, a Ruby client library. It was exciting to try a Node.js client instead (ioredis) and to be exposed to Redis JSON, going through its docs to find the most suitable command to use for this particular issue. It was also helpful to see another contributor suggest improvements to my implementation in their own pull request (PR) implementing validation for another API.

    ... and working on the issue

    1. Finding out how to contribute

    Different projects have different guidelines for contributing. These may be outlined in the README.md of the project's GitHub repo, in a separate CONTRIBUTING.md file in the repo's base directory, or in a guide on the project or organization's website, the latter being the case with Redis as explained in the issue description. The Redis Hacktoberfest guide asked contributors to comment on the issue they're working on and to only claim one at a time out of fairness, which seems to be standard procedure across many open source repos.

    Screen grab of Lara asking to be assigned

    2. Running the project

    After quickly combing through the README.md, my next step was to run the project to get a better sense of how it worked. At the time, the project structure was that you could run Redis via Docker but you had to run the API server locally (this has since been addressed via another issue), so this is what I did. I also made sure to load the sample data as instructed in the Setup section of the README.md (and after, if I remember correctly, a few errors).

    Screen grab of the Redis Kaboom server listening on port 8080

    Screen grab of the Redis Kaboom server listening on port 8080

    Screen grab of the Redis Kaboom server listening on port 8080

    3. Trying out the API

    The API in question is a GET request that fetches the data for a room given the game ID.

    Screen grab of Postman

    4. Implementing the validation

    If this API was called with an invalid room number, the server would crash with a 500 HTTP status code. The issue was to explicitly validate the room number, returning a more meaningful 400 HTTP status code and response body to the client.

    After combing through the sample data I had loaded previously via npm run load and finding out that the room data was persisted as an array of JSON objects, I assumed the minimum room number would be 0. To get the maximum, then, I would need to get the last index in the array by getting the array length and subtracting one from it. For this, I used the JSON.ARRLEN Redis JSON command, validating that the room number sent in the request path was within range and returning 400 otherwise.

    Screen grab of the file changes in GitHub

    5. Testing

    Always a beautiful moment:

    Screen grab of Postman showing an invalid room number

    6. Opening a PR

    Once I was satisfied with the functionality and quality of the code, I pushed my changes to a fork of the upstream repo and opened a PR. I simply linked the issue number in the PR description as there was no required template to follow and there wasn't much else to note regarding the implementation.

    Screen grab of Lara&#39;s pull request

    Post-merge-um

    On checking my PR to see that it was reviewed, approved, and merged (and to revel in the beauty of all those hacktoberfest-accepted, Merged, and Closed labels), I noticed another contributor had referenced my PR in their own. They had some good comments on a corner case I had missed as well as the format of the response I was sending.

    Screen grab of another contributor&#39;s PR

    A quarter of the way into Hacktoberfest 2021 and I had already learned a lot, engaged with other members of the open source community, and had a good time doing it. While finding and contributing to suitable open source issues could still be challenging at times, it was no longer the seemingly impossible task it used to be.

    - + \ No newline at end of file diff --git a/hacktoberfest/stories/vincent-aceto/index.html b/hacktoberfest/stories/vincent-aceto/index.html index dfe80d1b05..c15b88ab71 100644 --- a/hacktoberfest/stories/vincent-aceto/index.html +++ b/hacktoberfest/stories/vincent-aceto/index.html @@ -4,7 +4,7 @@ Hacktoberfest Stories: A Hacktoberfest Composition: Redis and Docker | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Hacktoberfest Stories: A Hacktoberfest Composition: Redis and Docker

    Hello! My name's Vincent Aceto and I am a Software Engineer based in New York City. Throughout the week, you can find me hacking away on mobile and TV applications over at Equinox Media. On the weekends, when I'm not getting lost somewhere on my skateboard, I'll be nose deep in some open source or personal projects.

    October is a special month for those who enjoy working on exciting software projects. In fact, there couldn’t be a more perfect month to get into software exploration and contribution; it's collectively known as Hacktoberfest! Hacktoberfest is a community-led effort to encourage open source contributing and foster learning. I am a huge advocate for open source, so getting involved in Hacktoberfest is such a joy; if I have the chance to learn something new or brush up on some skills, then definitely count me in.

    Now, rewind the clock a bit, and you'd find me perusing Github's Hacktoberfest-tagged issues. I wanted to find the best first contribution for the month's coding festivities. While searching, I had one very important criterion that the introductory issue needed to satisfy: to work with a technology that I do not use on a daily basis. I wanted to make sure that I walked away with a newfound knowledge that would benefit my career. After some time, my eyes landed on a Redis Developer Community issue - I knew it immediately, this was perfect! The checkbox was ticked, for I do not regularly work with Redis. I was now ready to kick off the Hacktoberfest celebration.

    The project I worked on is entitled Introducing The Geosearch Command. The goal of the project is to demonstrate the use of the GEOSEARCH command, which was added to Redis in the recent 6.2 release. Working as a software engineer, you are almost always going to be working with some cached data and, more often than not, it's Redis that is sitting nicely somewhere in that cache layer. That said, my first-hand experience (at the time) with the caching technology resonated somewhere between “landing page” and “getting started”. The project had turned out to be developer sale, a two-for-one: I would get to learn more about the Redis technology, how to set up an instance, familiarize myself with the API, and I would get the opportunity to work with Docker - which I'm not regularly hacking with during my day-to-day.

    Now, onto the issue. The issue's aim was to extend an existing Docker Compose integration. The docker-compose.yml file was to include a schema, which was to run the repository's Python Flask application in a Docker container. Additionally, the main application was to connect to the project's existing Redis container - this Redis build step was already included in the Docker Compose file. With the features and constraints clearly defined, the next step was to pull out the documentation. To make sure I was familiar with the tech at hand, and to ensure I got the most out of working on the issue, I started with the Redis installation docs - becoming aware that things like the default Redis port 6379 would come to serve me well when debugging. After installation, I took some time to explore the Redis API and read about Redis' internal hash implementation at a high level. The final reconnaissance was to review Docker. I had briefly used Docker at a previous position, and have worked on some personal projects using the container technology; however, a quick Dockerfile and docker-compose.yml refresher was necessary.

    With the pre-work done, it was time to start the Flask application's Docker Compose implementation. Here is a step-by-step guide, expressed in the present tense, to the process:

    First, let's start with the Docker Compose YAML file:

    Screen grab of the YAML file

    As you can see, we have some Redis provisioning steps. We assign a name to the container, define the version of Redis we wish to spin up, and the port mapping (6379:6379 states we'd like to expose port 6379 from inside the container to a port on your local machine).

    Now, let's start with composing the project's application. Unlike the Redis container, which uses an official Docker image to build from, we don't have a blueprint to scaffold the project's application. This blueprint, or schema, is called a Dockerfile. A Dockerfile lists steps on how to build our image. It's this very image that tells Docker's engine how to build the container. Let's create a Dockerfile, which will assemble the application image for us:

    Screen grab of the Dockerfile

    In short, this file serves as the foundation for the construction of the project's application environment. Additionally, the file tells Docker which files we want to include in our container, how to install the contained app's dependencies and what command should be used to run the app. Most of the file's instructions are better explained in the official documentation, so please take a look there if you're curious as to what the file's instructions have to offer.

    Great, before we move on to the compose file, let's make sure we test that Docker is able to build and run the container from our image.

    Let's build the image:

    Screen grab of the image build

    Get our newly created image's hash identifier by listing our local images:

    Screen grab of the list of local images

    Now let’s run the container using the image id, while making sure we bind a port on our machine to the exposed port defined in the Dockerfile:

    Screen grab of the port binding

    Great! The logs indicate the container is running. Let's ensure our port mapping is working. A quick cURL command verifies that we can talk to the application:

    Screen grab of the cURL command

    With the Flask application Docker-fiedTM, let's compose it with Redis!

    Screen grab of the Redis composition

    Let us quickly dissect what was added to the docker-compose.yml:

    1. Define a service for the application (namespaced under 'app')
    2. Define a name for the container
    3. Set a build context/entry point (this is the relative location for our service's Dockerfile)
    4. Map the service's port to the host machine
    5. Ensure that Redis is initialized before the Flask app starts (since the Flask application requires a Redis connection on init)
    6. Define the necessary environment variables.

    With the scaffolding in order, it's now time to run both the Flask application and Redis with Docker Compose. To do so, we'll run the command docker-compose up:

    Screen grab of the docker-compose-up command

    Finally, let's navigate to localhost:5000 in our browser to see the application in action:

    Screen grab showing localhost:5000

    Excellent, the Flask application is running and is composed with the pre-existing Redis integration!

    Now, before I conclude, I'd be remiss if I said that things worked as smoothly as portrayed; however, we welcome such hiccups and challenges. The main problem I faced was an empty response from the contained application server. What could be the issue? The Dockerfile, for the Flask app, is working. The compose file seemingly provisions our services successfully. What could be the problem here? Welp, turns out I forgot a very important factoid: Docker Compose will set up a single default network, one of which will house the services defined in the yaml file. Containers and their services can communicate within this network, but what about our browser - which is not on that Docker network?

    To resolve this issue, we need to tell our contained application server that it should listen on all networks, not just localhost; which, in the context of our running Docker network, is local only to that micro-network, if you will. To tell the Flask server to listen on all accessible networks, we can define our host in the Dockerfile's CMD command:

    Screen grab showing the CMD command

    All good!

    Working through this issue, I definitely picked up some newfound Redis knowledge! While not 100% necessary for the task at hand, starting with the official documentation and exploring the API provided me with the confidence needed to tackle this issue. Additionally, the project allowed me to solidify some pre-existing Docker knowledge; and, very politely, pointed out which knowledge gaps needed to be filled.

    Working through this Hacktoberfest-inspired issue was very rewarding, and I can say that I have walked away a better developer. Not only was I exposed to more technology, and got to flex some problem-solving muscles, but my passion for open-source software collaboration has grown evermore.

    Thank you for reading! I hope my story inspires you to start with (or continue) working with open source.


    You can find Vincent online at his website and at LinkedIn.

    - + \ No newline at end of file diff --git a/howtos/analytics/index.html b/howtos/analytics/index.html index 4edb928c9f..a20e79932b 100644 --- a/howtos/analytics/index.html +++ b/howtos/analytics/index.html @@ -4,7 +4,7 @@ Building an Analytics dashboard app using Redis | The Home of Redis Developers - + @@ -26,7 +26,7 @@ Example:

     GET rab:count:action:addToCart:timeSpan:2015-12/1
    • Shares of products bought ({productPage} is on of product1, product2, product3):

    December: GET rab:count:action:buy:page:{productPage}:timeSpan:2015-12 Example:

     GET rab:count:action:buy:page:product3:timeSpan:2015-12
    • X week of December: GET rab:count:action:buy:page:{productPage}:timeSpan:2015-12/{X} Example:

       GET rab:count:action:buy:page:product1:timeSpan:2015-12/2

    Customer and Cohort Analysis

    • People who registered: BITCOUNT rab:bitmap:action:register:timeSpan:2015-12
    • People who register then bought (order matters): BITCOUNT rab:bitmap:custom:cohort-buy:timeSpan:2015-12
    • Dropoff: (People who register then bought / People who register) * 100 [%]
    • Customers who bought only specified product ({productPage} is one of: product1, product2, product3):
    SMEMBERS rab:set:action:buy:page:{productPage}:timeSpan:2015-12

    Example:

     SMEMBERS rab:set:action:buy:page:product2:timeSpan:2015-12
    • Customers who bought Product1 and Product2:
    SINTER rab:set:action:buy:page:product1:timeSpan:anytime rab:set:action:buy:page:product2:timeSpan:anytime
    • Customer Retention (customers who bought on the different dates): SMEMBERS rab:set:custom:retention-buy:timeSpan:anytime

    References

    - + \ No newline at end of file diff --git a/howtos/antipatterns/index.html b/howtos/antipatterns/index.html index dc6aadffc3..91cfe14549 100644 --- a/howtos/antipatterns/index.html +++ b/howtos/antipatterns/index.html @@ -4,7 +4,7 @@ Redis Anti-Patterns Every Developer Should Avoid | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    Redis Anti-Patterns Every Developer Should Avoid


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    antipattern

    Developers don’t just use Redis, they love it. Stack Overflow’s annual Developer Survey 2021 has ranked Redis as the Most Loved Database platform for the fifth years running! But it is equally important to understand that Redis defaults are not the best for everyone. Millions of developers uses Redis due to its speed and performance, however it is important to make sure that it is being used properly.

    "Antipatterns" basically refers to those practices and solutions that might seem to be a good fit initially but when it comes to implementation phase, it makes your code much more complex. Let us look at the top Redis anti-patterns to avoid:

    1. Large databases running on a single shard/Redis instance

    With large databases running on a single shard/Redis instance, there are chances that the fail over, backup and recovery all will take longer. Hence, it’s always recommended to keep shards to recommended sizes. General conservative rule of thumb is 25Gb or 25K Ops/Second.

    Redis Enterprise recommends to shard if you have more than 25 GB of data and a high number of operations. Another aspect is if you have above 25,000 operations per second, then sharding can improve performance. With less number of operations/second, it can handle up to 50GB of data too.

    Examples #1 - redis-py

    Let us look at the redis-py that uses a connection pool to manage connections to a Redis server. By default, each Redis instance you create will in turn create its own connection pool. You can override this behavior and use an existing connection pool by passing an already created connection pool instance to the connection_pool argument of the Redis class. You may choose to do this in order to implement client side sharding or have fine-grain control of how connections are managed.

     >>> pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
    >>> r = redis.Redis(connection_pool=pool)

    Learn more about redis-py

    2. Connecting directly to Redis instances

    With a large number of clients, a reconnect flood will be able to simply overwhelm a single threaded Redis process and force a failover. Hence, it is recommended that you should use the right tool that allows you to reduce the number of open connections to your Redis server.

    Redis Enterprise DMC proxy allows you to reduce the number of connections to your cache server by acting as a proxy. There are other 3rd party tool like Twemproxy. It is a fast and lightweight proxy server that allows you to reduce the number of open connections to your Redis server. It was built primarily to reduce the number of connections to the caching servers on the backend. This, together with protocol pipelining and sharding enables you to horizontally scale your distributed caching architecture.

    3. More than one secondary shard (Redis OSS)

    Redis OSS uses a shard-based quorum. It's advised to use at least 3 copies of the data (2 replica shards per master shard) in order to be protected from split-brain situations. In nutshell, Redis OSS solves the quorum challenge by having an odd number of shards (primary + 2 replicas).

    Redis Enterprise solves the quorum challenge with an odd number of nodes. Redis Enterprise avoids a split-brain situation with only 2 copies of the data, which is more cost-efficient. In addition, the so-called ‘quorum-only node' can be used to bring a cluster up to an odd number of nodes if an additional, not necessary data node would be too expensive.

    4. Performing single operation

    Performing several operations serially increases connection overhead. Instead, use Redis Pipelining. Pipelining is the process of sending multiple messages down the pipe without waiting on the reply from each - and (typically) processing the replies later when they come in.

    Pipelining is completely a client side implementation. It is aimed at solving response latency issues in high network latency environments. So, the lesser the amount of time spent over the network in sending commands and reading responses, the better. This is effectively achieved by buffering. The client may (or may not) buffer the commands at the TCP stack (as mentioned in other answers) before they are sent to the server. Once they are sent to the server, the server executes them and buffers them on the server side. The benefit of the pipelining is a drastically improved protocol performance. The speedup gained by pipelining ranges from a factor of five for connections to localhost up to a factor of at least one hundred over slower internet connections.

    5. Caching keys without TTL

    Redis functions primarily as a key-value store. It is possible to set timeout values on these keys. Said that, a timeout expiration automatically deletes the key. Additionally, when we use commands that delete or overwrite the contents of the key, it will clear the timeout. Redis TTL command is used to get the remaining time of the key expiry in seconds. TTL returns the remaining time to live of a key that has a timeout. This introspection capability allows a Redis client to check how many seconds a given key will continue to be part of the dataset.Keys will accumulate and end up being evicted. Hence, it is recommended to set TTLs on all caching keys.

    6. Endless Redis Replication Loop

    When attempting to replicate a very large active database over a slow or saturated link, replication never finishes due to the continuous updates. Hence, it is recommended to tune the slave and client buffers to allow for slower replication. Check out this detailed blog.

    7. Hot Keys

    Redis can easily become the core of your app’s operational data, holding valuable and frequently accessed information. However, if you centralize the access down to a few pieces of data accessed constantly, you create what is known as a hot-key problem. In a Redis cluster, the key is actually what determines where in the cluster that data is stored. The data is stored in one single, primary location based off of hashing that key. So, when you access a single key over and over again, you’re actually accessing a single node/shard over and over again. Let’s put it another way—if you have a cluster of 99 nodes and you have a single key that gets a million requests in a second, all million of those requests will be going to a single node, not spread across the other 98 nodes.

    Redis even provides tools to find where your hot keys are located. Use redis-cli with the –hotkeys argument alongside any other arguments you need to connect:

     $ redis-cli --hotkeys

    When possible, the best defence is to avoid the development pattern that is creating the situation. Writing the data to multiple keys that reside in different shards will allow you to access the same data more frequently. In nutshell, having specific keys that are accessed with every client operation. Hence, it's recommended to shard out hot keys using hashing algorithms. You can set policy to LFU and run redis-cli --hotkeys to determine.

    8. Using Keys command

    In Redis, the KEYS command can be used to perform exhaustive pattern matching on all stored keys. This is not advisable, as running this on an instance with a large number of keys could take a long time to complete, and will slow down the Redis instance in the process. In the relational world, this is equivalent to running an unbound query (SELECT...FROM without a WHERE clause). Execute this type of operation with care, and take necessary measures to ensure that your tenants are not performing a KEYS operation from within their application code. Use SCAN, which spreads the iteration over many calls, not tying up your whole server at one time.

    Scaning keyspace by keyname is an extremely slow operation and will run O(N) with N being the number of keys. It is recommended to use Redis Search to return information based on the contents of the data instead of iterating through the key space.

     FT.SEARCH orders "@make: ford @model: explorer"
    2SQL: SELECT * FROM orders WHERE make=ford AND model=explorer"

    9. Running Ephemeral Redis as a primary database

    Redis is often used as a primary storage engine for applications. Unlike using Redis as a cache, using Redis as a primary database requires two extra features to be effective. Any primary database should really be highly available. If a cache goes down, then generally your application is in a brown-out state. If a primary database goes down, your application also goes down. Similarly, if a cache goes down and you restart it empty, that’s no big deal. For a primary database, though, that’s a huge deal. Redis can handle these situations easily, but they generally require a different configuration than running as a cache. Redis as a primary database is great, but you’ve got to support it by turning on the right features.

    With Redis open source, you need to set up Redis Sentinel for high availability. In Redis Enterprise, it’s a core feature that you just need to turn on when creating the database. As for durability, both Redis Enterprise and open source Redis provide durability through AOF or snapshotting so your instance(s) start back up the way you left them.

    10. Storing JSON blobs in a string

    Microservices written in several languages may not marshal/unmarshal JSON in a consistent manner. Application logic will be required to lock/watch a key for atomic updates. JSON manipulation is often a very compute costly operation. Hence, it is recommended to use HASH data structure and also Redis JSON.

    11. Translating a table or JSON to a HASH without considering query pattern

    The only query mechanism is a SCAN which requires reading the data structure and limits filtering to the MATCH directive. It is recommended to store the table or JSON as a string. Break out the indexes into reverse indexes using a SET or SORTED SET and point back to the key for the string. Using SELECT command and multiple databases inside one Redis instance

    The usage of SELECT and multiple databases inside one Redis instance was mentioned as an anti-pattern by Salvatore (the creator of Redis). It is recommended to use a dedicated Redis instance for each database need. This is especially true in microservice architectures where client applications might step on each other's toes (noisy neighbor, database setup/teardown impact, maintenance, upgrade, ...)

    The Redis Time Series module provides a direct compete to time series databases. But if the only query is based on ordering, it's unnecessary complexity. Hence, it is recommended to use a SORTED SET with a score of 0 for every value. The values are appended. Or use a timestamp for the score for simple time based queries

    References

    Redis Launchpad
    - + \ No newline at end of file diff --git a/howtos/caching/index.html b/howtos/caching/index.html index c739b290f6..f9161a545f 100644 --- a/howtos/caching/index.html +++ b/howtos/caching/index.html @@ -4,7 +4,7 @@ How to cache REST API responses Using Redis & NodeJS | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    How to cache REST API responses Using Redis & NodeJS


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    My Image

    This app returns the number of repositories a Github account has. When you first search for an account, the server calls Github's API to return the response. This can take some time. The server then adds the details of this slow response to Redis for future requests. When you search again, the next response comes directly from Redis cache instead of calling Github. The responses become much faster.

    Pre-requisite

    1. Install NodeJS

    brew install node

    2. Clone the repository

    git clone https://github.com/redis-developer/basic-caching-demo-nodejs

    3. Copy .env.sample to create .env

    - REDIS_ENDPOINT_URI: Redis server URI
    - REDIS_PASSWORD: Password to the server

    4. Run frontend

    cd client
    yarn
    yarn serve

    5. Run backend

    yarn
    yarn start

    Open up https://localhost:8081 and you can see a basic caching demo application up and running.

    My Image

    - + \ No newline at end of file diff --git a/howtos/chatapp/index.html b/howtos/chatapp/index.html index d66af6a1b1..eba22d6ae1 100644 --- a/howtos/chatapp/index.html +++ b/howtos/chatapp/index.html @@ -4,7 +4,7 @@ How to build a Chat application using Redis | The Home of Redis Developers - + @@ -17,7 +17,7 @@ User data is stored in a hash set where each user entry contains the next values:

    • username: unique user name;

    • password: hashed password

    • Additionally a set of chat rooms is associated with user

    • Rooms are sorted sets which contains messages where score is the timestamp for each message

    • Each chat room has a name associated with it

    • The "online" set is global for all users is used for keeping track on which user is online.

    • Each user hash's set is accessed by key user:{userId}. The data for it stored with HSET key field data. User ID is calculated by incrementing the total_users key (INCR total_users)

    • Usernames are stored as separate keys (username:{username}) which returns the userId for quicker access and stored with SET username:{username} {userId}.

    • Rooms which a user belongs to are stored at user:{userId}:rooms as a set of chat room ids. A room is added by SADD user:{userId}:rooms {roomId} command.

    • Messages are stored at room:{roomId} key in a sorted set (as mentioned above). They are added with the ZADD room:{roomId} {timestamp} {message} command. Messages are serialized to an app-specific JSON string.

    How the data is accessed?

    Get User HGETALL user:{id}.

     HGETALL user:2

    where we get data for the user with id: 2.

    • Online users: SMEMBERS online_users. This will return ids of users which are online

    • Get room ids of a user: SMEMBERS user:{id}:rooms. Example:

     SMEMBERS user:2:rooms

    This will return IDs of chat rooms for user with ID: 2

    • Get list of messages ZREVRANGE room:{roomId} {offset_start} {offset_end}. Example:
     ZREVRANGE room:1:2 0 50

    It will return 50 messages with 0 offsets for the private room between users with IDs 1 and 2.

    - + \ No newline at end of file diff --git a/howtos/frauddetection/index.html b/howtos/frauddetection/index.html index 914e8c44d3..3517f4bd64 100644 --- a/howtos/frauddetection/index.html +++ b/howtos/frauddetection/index.html @@ -4,7 +4,7 @@ How to build a Fraud Detection System using Redis | The Home of Redis Developers - + @@ -16,7 +16,7 @@ Using zcount, we find the number of clicks from a device in a certain pre configured window. If the count received is greater than a certain threshold, we identify it as anomalous.

    Finally, data is pushed to Redistream using the xadd command. id=’*’ indicates Redistream to generate a unique id for our message.

    Registering Gears:

    When the app appears, a gear is registered, which reacts to the stream that we use to push data.

    Gist:https://gist.github.com/Sachin-Kottarathodi/f9dac7a3342a3643e792e2143a6adf7d

     from gearsclient import GearsRemoteBuilder as GearsBuilder
    from redistimeseries.client import Client

    def stream_handler(item):
    data = item['value']
    member = json.dumps(
    {'device_id': data['device_id'],
    'transaction_id': data['transaction_id'],
    'ts': data['ts'],
    })
    redis.Redis().zadd(data.get('device_id'), {member: data['ts']})
    Client().incrby(data['fraud_type'], 1)

    GearsBuilder(reader='StreamReader', r=redis_conn, requirements=["redis", "redistimeseries"]).foreach(stream_handler).register('data_stream')

    As mentioned before, since RedisGears and Redis Time Series are modules, we need to use the clients provided in their respective packages.

    We use the GearsRemoteBuilder class to build the Gear. StreamReader ensures that the stream_handler function is executed for every new message from the stream. The stream_handler adds the data to the sorted set using zadd (This information is used in zcount to identify click_spam) and increments the count of time series for clean and fraud types using incrby of the Redis Time Series module, which is later used for visualization.

    Fraud Detection

    Gear registration can be checked on RedisInsight as well.

    Finally, we incorporate the flask app which exposes the end point for trigger.

    Gist: https://gist.github.com/Sachin-Kottarathodi/2a6cccb29b4a9fdc7d58086af07aa6eb

     from flask import Flask, request
    from fraud_checks import FraudChecks
    from setup import Setup
    app = Flask(__name__)


    @app.route('/', methods=['POST'])
    def check_fraud():
    try:
    response = FraudChecks().check_fraud(request.get_json())
    code = 200
    except Exception as e:
    print("Error occurred ", e)
    response = str(e)
    code = 500

    return response, code


    if __name__ == '__main__':
    Setup().init()
    app.run(port=5000, debug=False, host='0.0.0.0')

    Here, the app is exposed on port 5000. Before starting the server, our init method of setup is called to register the gear.The endpoint calls the function that does the fraud checks and returns the response.

    The application is written in python and exposes an endpoint which accepts a few parameters. Use the below command to invoke the application:

     $ curl --request POST 'localhost:5000' --header 'Content-Type: application/json' --data-raw '{
    "device_id": "111-000-000",
    "ip": "1.1.1.1",
    "transaction_id": "3e4fad5fs"}'
    clean

    Since initially no data is available in Cuckoo Filter, all IPs will be allowed through. To add data to Cuckoo Filter, connect to Redis using cli and run the command

     cf.addnx ip_cf 1.1.1.1

    Run the post command with this IP again. This time, the result will be ip_blacklist.

    Fraud Detection

    Click Spamming:

    The app is configured to allow two events in a window of 10 seconds from the same device. To verify, make more than two curl requests within 10 seconds and the result will be click_spam.

    Fraud Detection

    Optional: The following variables can be configured during the ‘docker run’ command. -e CLICK_SPAM_THRESHOLD=3 -e CLICK_SPAM_WINDOW_IN_SEC=10

    Step #6: Deploy Grafana

    It’s exciting to see the fraud detection plotted in Grafana. To implement this, run the command below:

     $ docker run -d -e "GF_INSTALL_PLUGINS=redis-app" -p 3000:3000 grafana/grafana

    Point your browser to https://<IP_ADDRESS>:3000.

    Fraud detection

    Login as ‘admin’ with password as ‘admin’, you can reset the password after your first login.

    Fraud detection

    Click on the gear icon on the left panel (Configuration) and choose Data Sources.

    Fraud detection

    Choose ‘Add data source’.

    Fraud detection

    Search for Redis and choose Redis Data Source.

    Fraud detection

    Copy and paste the raw json content from here in the ‘Import via panel json’ box. Click on Load.

    Fraud detection

    This creates a dashboard ‘Fraud Stats’. If you get an error while importing the dashboard, try changing the name and UUID of the dashboard.

    Fraud detection

    Fraud detection

    Conclusion & future work

    • If we consider the entire flow starting from fraud check, from event streaming to data processing to visualization (using insights), all of this would have required multiple components and extensive orchestration. With Redis Ecosystem, most of this is removed.
    • This is just the beginning of more checks that can be done on events. A lot of other checks can be done using modules and data structures. For example; Redis provides geospatial data structures built over sorted sets. Since latitude and longitude can be derived from IP using IP to location conversion providers, a lot of insight can be derived on whether the event can be fraudulent or not.
    • To reject servicing requests altogether, the redis-cell module to rate limit requests against a key can be used.
    - + \ No newline at end of file diff --git a/howtos/hackernews/index.html b/howtos/hackernews/index.html index d3dd67bf4c..b1da5cbe48 100644 --- a/howtos/hackernews/index.html +++ b/howtos/hackernews/index.html @@ -4,7 +4,7 @@ How to build a HackerNews Clone using Redis | The Home of Redis Developers - + @@ -15,7 +15,7 @@ It developed as a project of Graham's company Y Combinator, functioning as a real-world application of the Arc . programming language which Graham co-developed.

    This is a HackerNews clone built upon React, NextJS as a frontend and NodeJS, ExpressJS & Redis as a backend. This application uses JSON for storing the data and Search in Redis Stack for searching.

    hackernews

    Step 1. Install the prerequisites

    Install the below packages

    • NPM v7.8.0
    • NODE v15.10.0

    Step 2. Create Redis Enterprise Cloud database

    Redis is an open source, in-memory, key-value data store most commonly used as a primary database, cache, message broker, and queue. Redis is popular among the developers as it delivers sub-millisecond response times, enabling fast and powerful real-time applications in industries such as gaming, fintech, ad-tech, social media, healthcare, and IoT.

    Redis Cloud is a fully-managed cloud service for hosting and running your Redis dataset in a highly-available and scalable manner, with predictable and stable top performance. Redis Enterprise cloud allows you to run Redis server over the Cloud and access instance via multiple ways like RedisInsight, Redis command line as well as client tools. You can quickly and easily get your apps up and running with Redis Cloud through its Redis Heroku addons , just tell us how much memory you need and get started instantly with your first Redis database. You can then add more Redis databases (each running in a dedicated process, in a non-blocking manner) and increase or decrease the memory size of your plan without affecting your existing data.

    Follow this link to create a Redis Enterprise Cloud account with 2 databases with Redis Stack.

    Save the database endpoint URL and password for our future reference

    Step 3. Clone the repository

     git clone https://github.com/redis-developer/redis-hacker-news-demo
    cd redis-hacker-news-demo

    Step 4. Setting up environment variables

    Copy .env.sample to .env and provide the values as shown below:

     MAILGUN_API_KEY=YOUR_VALUE_HERE
    SEARCH_REDIS_SERVER_URL=redis://redis-XXXXX.c10.us-east-1-2.ec2.cloud.redislabs.com:10292
    SEARCH_REDIS_PASSWORD=ABCDXYZbPXHWsC
    JSON_REDIS_SERVER_URL=redis://redis-XXXXX.c14.us-east-1-2.ec2.cloud.redislabs.com:14054
    JSON_REDIS_PASSWORD=ABCDXYZA3tzw2XYMPi2P8UPm19D
    LOG_LEVEL=1
    USE_REDIS=1
    REDIS_REINDEX=
    PRODUCTION_WEBSITE_URL=i

    Step 5. Run the developer environment

     npm install
    npm run dev

    Step 6. Pull Hacker News API to seed database

    Using API, it pulls the latest hackernews data. Next, you need to seed top stories from hacker news. First create a moderator with moderator:password123

     node ./backend/scripts/seed.js

    Step 7. Access the HackerNews URL

    Open https://localhost:3001 and you should be able to access the HackerNews login screen as shown below:

    hackernews

    How it works

    By Screens

    Signup

    Signup Screen

    • Make sure user(where username is andy1) does not exist.
     FT.SEARCH idx:user @username:"andy1" NOCONTENT LIMIT 0 1 SORTBY _id DESC
    • Get and increase the next id in users collection.
     GET user:id-indicator // 63
    INCR user:id-indicator // 64 will be next user id, 63 is current user id
    • Create user:63 hash and json.(json also collects authToken and password hash etc)
      HSET user:63 username andy1 email  created 1615569194 karma 0 about  showDead false isModerator false shadowBanned false banned false _id 63
      JSON.SET user:63 .
     '{"username":"andy1","password":"$2a$10$zy8tsCske8MfmDX5CcWMce5S1U7PJbPI7CfaqQ7Bo1PORDeqJxqhe","authToken":"AAV07FIwTiEkNrPj0x1yj6BPJQSGIPzV0sICw2u0","  authTokenExpiration":1647105194,"email":"","created":1615569194,"karma":0,"showDead":false,"isModerator":false,"shadowBanned":false,"banned":false,"_id":63}'

    Login

    Login Screen

    • Find user
     FT.SEARCH idx:user  @username:"andy1" NOCONTENT LIMIT 0 1 SORTBY _id DESC
    • Make sure password is correct
     JSON.MGET user:63 .
    • Compare password and new password hash and create cookie if it's successful

    Item list page

    Newest Screen

    • Check if user has toggled hidden attribute on a specific item.
     FT.SEARCH idx:user-hidden  @username:"andy1" NOCONTENT LIMIT 0 10000 SORTBY _id DESC
    // Result - [0, "item:4"]
    • If that is not null
     FT.SEARCH idx:item  (-(@id:"item:4")) (@dead:"false") NOCONTENT LIMIT 0 30 SORTBY _id ASC
    • If it's empty array
     FT.SEARCH idx:item (@dead:"false") NOCONTENT LIMIT 0 30 SORTBY _id ASC
    // Result - [3,"item:1","item:2","item:3"]
    • Get all items from Redis using JSON.MGET
     JSON.MGET item:1 item:2 item:3 .
    // Result - [{"id":"bkWCjcyJu5WT","by":"todsacerdoti","title":"Total Cookie
    Protection","type":"news","url":"https://blog.mozilla.org/security/2021/02/23/total-cookie-
    protection/","domain":"mozilla.org","points":1,"score":1514,"commentCount":0,"created":1614089461,"dead":false,"_id":3}]]
    • Get items posted within last 1 week
     FT.SEARCH idx:item  (@created:[(1615652598 +inf]) (@dead:"false") NOCONTENT LIMIT 0 0 SORTBY _id DESC
    // Result - [13,"item:19","item:17","item:16","item:15","item:14","item:13","item:12","item:11","item:8","item:5","item:4","item:3","item:1"]
    note

    In this case, 1615652598 is a timestamp of 1 week ealier than current timestamp

     JSON.MGET item:19 item:17 item:16 item:15 item:14 item:13 item:12 item:11 item:8 item:5 item:4 item:3 item:1 .
    // Result - the JSON of selected items

    Item Detail

    Item Detail Screen

    • Get the item object first
     JSON.MGET item:1 .
    • Find item:1 's root comments
     FT.SEARCH idx:comment  (@parentItemId:"kDiN0RhTivmJ") (@isParent:"true") (@dead:"false") NOCONTENT LIMIT 0 30 SORTBY points ASC
    // Result - [3,"comment:1","comment:2","comment:12"]
    • Get those comments
     JSON.MGET comment:1 comment:2 comment:12 .
    // one comment example result - {"id":"jnGWS8TTOecC","by":"ploxiln","parentItemId":"kDiN0RhTivmJ","parentItemTitle":"The Framework
    Laptop","isParent":true,"parentCommentId":"","children":[13,17,20],"text":"I don&#x27;t see any mention of the firmware and drivers efforts for this.
    Firmware and drivers always end up more difficult to deal with than expected.<p>The Fairphone company was surprised by difficulties upgrading and
    patching android without support from their BSP vendor, causing many months delays of updates _and_ years shorter support life than they were
    planning for their earlier models.<p>I purchased the Purism Librem 13 laptop from their kickstarter, and they had great plans for firmware and
    drivers, but also great difficulty following through. The trackpad chosen for the first models took much longer than expected to get upstream linux
    support, and it was never great (it turned out to be impossible to reliably detect their variant automatically). They finally hired someone with
    sufficient skill to do the coreboot port _months_ after initial units were delivered, and delivered polished coreboot firmware for their initial
    laptops _years_ after they started the kickstarter.<p>So, why should we have confidence in the firmware and drivers that Framework will deliver
    :)","points":1,"created":1614274058,"dead":false,"_id":12}
    • Using children of each comment, fetch children comments
     FT.SEARCH idx:comment  (@dead:"false") (@_id:("3"|"7"|"11")) NOCONTENT LIMIT 0 10000 SORTBY _id DESC
    • Iterate this over until all comments are resolved

    Submit

    Submit Screen

    • Get next item's id and increase it
     GET item:id-indicator
    // Result - 4
    SET item:id-indicator 5
    • Create hash and index
     HSET item:4 id iBi8sU4HRcZ2 by andy1 title Firebase trends type ask url  domain  text Firebase Performance Monitoring is a service that helps you to
    gain insight into the performance characteristics of your iOS, Android, and web apps. points 1 score 0 created 1615571392 dead false _id 4
     JSON.SET item:4 . '{"id":"iBi8sU4HRcZ2","by":"andy1","title":"Firebase trends","type":"ask","url":"","domain":"","text":"Firebase Performance
    Monitoring is a service that helps you to gain insight into the performance characteristics of your iOS, Android, and web
    apps.","points":1,"score":0,"commentCount":0,"created":1615571392,"dead":false,"_id":4}'

    Update Profile

    Update Profile Screen

    • Get the user
     FT.SEARCH idx:user  (@username:"andy1") NOCONTENT LIMIT 0 1 SORTBY _id DESC
     JSON.MGET user:63 .
    • Update new user
     HSET user:63 username andy1 email  created 1615569194 karma 1 about I am a software engineer. showDead false isModerator false shadowBanned false
    banned false _id 63
     JSON.SET user:63 .
    '{"username":"andy1","password":"$2a$10$zy8tsCske8MfmDX5CcWMce5S1U7PJbPI7CfaqQ7Bo1PORDeqJxqhe","authToken":"KJwPLN1idyQrMp5qEY5hR3VhoPFTKRcC8Npxxoju"," authTokenExpiration":1647106257,"email":"","created":1615569194,"karma":1,"about":"I am a software
    engineer.","showDead":false,"isModerator":false,"shadowBanned":false,"banned":false,"_id":63}'

    Moderation Logs screen

    Moderation Logs

    • Find all moderation logs
     FT.SEARCH idx:moderation-log * NOCONTENT LIMIT 0 0 SORTBY _id DESC
    // Result - [1,"moderation-log:1"]
    • Get that moderation logs
     JSON.MGET moderation-log:1 .

    Search Screen

    • Get items that contains "fa"
     FT.SEARCH idx:item  (@title:fa*) (-(@id:"aaaaaaaaa")) (@dead:"false") NOCONTENT LIMIT 0 30 SORTBY score ASC
    // Result - [2,"item:18","item:16"]
    • Get those items via json
     JSON.MGET item:18 item:16 .

    Example commands

    There are 2 type of fields, indexed and non-indexed.

    1. Indexed fields will be stored in hash using HSET/HGET.
    2. Non-indexed fields will be stored in JSON.
    • Create an index

    When schema is created, it should created index.

     FT.CREATE idx:user ON hash PREFIX 1 "user:" SCHEMA username TEXT SORTABLE email TEXT SORTABLE karma NUMERIC SORTABLE
    • Drop search index

    Should drop/update index if the schema has changed

     FT.DROPINDEX idx:user
    • Get search info

    Validate if the fields are indexed properly. If not, it will update the index fields or drop/recreate.

     FT.INFO idx:user
    • Create a new user

    It will require new hash and new JSON record

     HSET user:andy username "andy" email "andy@gmail.com" karma 0
     JSON.SET user:andy '{"passoword": "hashed_password", "settings": "{ \"showDead\": true }" }'
    • Update a user
     HSET user:1 username "newusername"
     JSON.SET user:andy username "newusername"
    • Find user with username 'andy'
    1. Find the user's hash first
     FT.SEARCH idx:user '@username:{andy}'
    1. Fetch the JSON object to get the related JSON object
     JSON.GET user:andy
    • Find user whose id is andy1 or andy2
     FT.SEARCH idx:user '@id:("andy1"|"andy2")'
    • Find user whose id is not andy1 or andy2
     FT.SEARCH idx:user '(-(@id:("andy1"|"andy2")))'
    • Find user whose id is andy1 or username is andy
     FT.SEARCH idx:user '(@id:"andy1") | (@username:"andy")'
    • Find user whose id is andy1 and username is andy
     FT.SEARCH idx:user '(@id:"andy1") (@username:"andy")'
    • Find first 10 users order by username
     FT.SEARCH idx:user '*' LIMIT 0 10 SORTBY username ASC
    • Find next 10 users
     FT.SEARCH idx:user '*' LIMIT 10 20 SORTBY username ASC
    • Get from JSON from multiple keys
     JSON.MGET idx:user "andy1" "andy2" "andy3"

    References

    - + \ No newline at end of file diff --git a/howtos/herokujava/index.html b/howtos/herokujava/index.html index 6a6ece9f93..bb52b9aec3 100644 --- a/howtos/herokujava/index.html +++ b/howtos/herokujava/index.html @@ -4,7 +4,7 @@ How to build a Java based application on Heroku using Redis | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    How to build a Java based application on Heroku using Redis


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Step 1. Create Redis Enterprise Cloud

    Create your free Redis Enterprise Cloud account. Follow this link to create Redis Enterprise Cloud subscription and database as shown below:

    heroku

    Save the database endpoint URL and password for future reference.

    Step 2. Create a Heroku account

    If you are using Heroku for the first time, create your new Heroku account through this link

    heroku

    Step 3. Install Heroku CLI on your system

     brew install heroku

    Step 4. Login to Heroku

     heroku login
    heroku: Press any key to open up the browser to login or q to exit:
    Opening browser to https://cli-auth.heroku.com/auth/cli/browser/XXXXXXXXXXA
    Logging in... done
    Logged in as your_email_address

    Step 5. Connect your application to Redis Enterprise Cloud

    For this demonstration, we will be using a Sample Rate Limiting application

    Clone the repository

     git clone https://github.com/redis-developer/basic-rate-limiting-demo-java
    heroku create
    Creating app... done, ⬢ hidden-woodland-03996
    https://hidden-woodland-03996.herokuapp.com/ | https://git.heroku.com/hidden-woodland-03996.git

    Step 6. Setting up environment variables

    Go to Heroku dashboard, click "Settings" and set REDIS_ENDPOINT_URI and REDIS_PASSWORD under the Config Vars. Refer to Step 1 for reference.

    heroku

    You now have a functioning Git repository that contains a simple application as well as a package.json file, which is used by Node’s dependency manager.

    Step 7. Deploy your code

    Heroku generates a random name (in this case hidden-woodland-03996) for your app, or you can pass a parameter to specify your own app name. Now deploy your code:

    $ git push heroku
    remote: BUILD SUCCESSFUL in 1m 5s
    remote: 12 actionable tasks: 12 executed
    remote: -----> Discovering process types
    remote: Procfile declares types -> web
    remote:
    remote: -----> Compressing...
    remote: Done: 298.9M
    remote: -----> Launching...
    remote: Released v3
    remote: https://hidden-woodland-03996.herokuapp.com/ deployed to Heroku
    remote:
    remote: Verifying deploy... done.
    To https://git.heroku.com/hidden-woodland-03996.git
    * [new branch] master -> master

    Step 8. Accessing the application

    Open https://hidden-woodland-03996.herokuapp.com/ to see your application

    heroku

    Next Steps

    - + \ No newline at end of file diff --git a/howtos/herokunodejs/index.html b/howtos/herokunodejs/index.html index d599569e7a..d0676713ef 100644 --- a/howtos/herokunodejs/index.html +++ b/howtos/herokunodejs/index.html @@ -4,7 +4,7 @@ How to build a NodeJS based application on Heroku using Redis | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    How to build a NodeJS based application on Heroku using Redis


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Step 1. Create Redis Enterprise Cloud

    Create your free Redis Enterprise Cloud account. Follow this link to create Redis Enterprise Cloud subscription and database as shown below:

    heroku

    Save the database endpoint URL and password for future reference.

    Step 2. Create a Heroku account

    If you are using Heroku for the first time, create your new Heroku account through this link

    heroku

    Step 3. Install Heroku CLI on your system

     brew install heroku

    Step 4. Login to Heroku

     heroku login
    heroku: Press any key to open up the browser to login or q to exit:
    Opening browser to https://cli-auth.heroku.com/auth/cli/browser/XXXXXXXXXXA
    Logging in... done
    Logged in as your_email_address

    Step 5. Connect your application to Redis Enterprise Cloud

    For this demonstration, we will be using a Sample Rate Limiting application

    Clone the repository

     git clone https://github.com/redis-developer/basic-redis-rate-limiting-demo-nodejs

    Run the below CLI to have a functioning Git repository that contains a simple application as well as a package.json file.

    heroku create
    Creating app... done, ⬢ rocky-lowlands-06306
    https://rocky-lowlands-06306.herokuapp.com/ | https://git.heroku.com/rocky-lowlands-06306.git

    Step 6. Setting up environment variables

    Go to Heroku dashboard, click "Settings" and set REDIS_ENDPOINT_URI and REDIS_PASSWORD under the Config Vars. Refer to Step 1 for the reference.

    heroku

    You now have a functioning Git repository that contains a simple application as well as a package.json file, which is used by Node’s dependency manager.

    Step 7. Deploy your code

    $ git push heroku

    Wait for few seconds and you will see the below messages being displayed:

    remote: -----> Launching...
    remote: Released v3
    remote: https://rocky-lowlands-06306.herokuapp.com/ deployed to Heroku
    remote:
    remote: Verifying deploy... done.
    To https://git.heroku.com/rocky-lowlands-06306.git
    * [new branch] main -> main

    Step 8. Accessing the application

    Open https://rocky-lowlands-06306.herokuapp.com/ to see your application

    heroku

    Next Steps

    - + \ No newline at end of file diff --git a/howtos/herokupython/index.html b/howtos/herokupython/index.html index 9748776471..c9a0b99116 100644 --- a/howtos/herokupython/index.html +++ b/howtos/herokupython/index.html @@ -4,7 +4,7 @@ How to build a Python based application on Heroku using Redis | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    How to build a Python based application on Heroku using Redis


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Step 1. Create Redis Enterprise Cloud

    Create your free Redis Enterprise Cloud account. Follow this link to create Redis Enterprise Cloud subscription and database as shown below:

    heroku

    Save the database endpoint URL and password for future reference.

    Step 2. Create a Heroku account

    If you are using Heroku for the first time, create your new Heroku account through this link

    heroku

    Step 3. Install Heroku CLI on your system

     brew install heroku

    Step 4. Login to Heroku

     heroku login
    heroku: Press any key to open up the browser to login or q to exit:
    Opening browser to https://cli-auth.heroku.com/auth/cli/browser/XXXXXXXXXXA
    Logging in... done
    Logged in as your_email_address

    Step 5. Connect your application to Redis Enterprise Cloud

    For this demonstration, we will be using a Sample Rate Limiting application

    Clone the repository

     git clone https://github.com/redis-developer/basic-rate-limiting-demo-python

    Run the below CLI to have a functioning Git repository that contains a simple application as well as a package.json file.

    $ heroku create
    Creating app... done, ⬢ fast-reef-76278
    https://fast-reef-76278.herokuapp.com/ | https://git.heroku.com/fast-reef-76278.git

    Step 6. Setting up environment variables

    Go to Heroku dashboard, click "Settings" and set REDIS_ENDPOINT_URI and REDIS_PASSWORD under the Config Vars. Refer to Step 1 for reference.

    heroku

    Step 7. Deploy your code

    Heroku generates a random name (in this case fast-reef-76278) for your app, or you can pass a parameter to specify your own app name. Now deploy your code:

    $ git push heroku
    Enumerating objects: 512, done.
    Counting objects: 100% (512/512), done.
    Delta compression using up to 12 threads
    Compressing objects: 100% (256/256), done.
    Writing objects: 100% (512/512), 1.52 MiB | 660.00 KiB/s, done.
    Total 512 (delta 244), reused 512 (delta 244)
    remote: Compressing source files... done.
    remote: Building source:
    remote:
    remote: -----> Building on the Heroku-20 stack
    remote: -----> Determining which buildpack to use for this app
    remote: -----> Python app detected


    emote: -----> Compressing...
    remote: Done: 59.3M
    remote: -----> Launching...
    remote: Released v5
    remote: https://fast-reef-76278.herokuapp.com/ deployed to Heroku
    remote:
    remote: Verifying deploy... done.
    To https://git.heroku.com/fast-reef-76278.git
    * [new branch] master -> master

    Step 8. Accessing the application

    Open https://fast-reef-76278.herokuapp.com/ to see your application

    heroku

    Next Steps

    - + \ No newline at end of file diff --git a/howtos/index-modules/index.html b/howtos/index-modules/index.html index b3848e8b35..a8593bd6fe 100644 --- a/howtos/index-modules/index.html +++ b/howtos/index-modules/index.html @@ -4,7 +4,7 @@ index-modules | The Home of Redis Developers - + @@ -18,7 +18,7 @@ hide_table_of_contents: true slug: /modules/ custom_edit_url:


    ~

    - + \ No newline at end of file diff --git a/howtos/index.html b/howtos/index.html index 9bb0039540..50cba5ed7c 100644 --- a/howtos/index.html +++ b/howtos/index.html @@ -4,7 +4,7 @@ HowTos & Tutorials | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    HowTos & Tutorials

    HowTos

    This page holds a catalog with dozens of ready-to-use app listings from Redis. Now it is easy to build, test and deploy software that runs using Redis.

    How to build a Rate Limiter using Redis
    Implementing a Shopping Cart using NodeJS & Redis
    How to search and list Movies database using Redis
    Building a Real-Time Leaderboard using Redis
    Building Fraud Detection using Redis
    How to cache REST API responses Using Redis & NodeJS
    Building a Chat application using Redis
    How to build an Analytics dashboard app using Redis
    How to build a Popup Store using Redis features like Triggers/ Functions and Time Series data model
    Building a HackerNews Clone using Redis JSON, Search and Query features
    How to build a pipeline for Natural Language Processing using Redis Triggers and Functions
    How to build Movies database (Graph) using Redis & NodeJS
    How to Build a Social Network Application using Redis and NodeJS
    Using SSL/TLS with Redis Enterprise

    Tutorials

    Getting Started with Redis Search and Query features
    Getting Started with Redis JSON feature
    Getting Started with Redis Time Series data model
    Getting Started with Redis Graph
    Getting Started with Redis Probabilistic data model
    Getting Started with Redis Triggers and Functions
    How to build Python based Rate Limiting application on Heroku using Redis
    How to build NodeJS based Rate Limiting application on Heroku using Redis
    How to build Java based application on Heroku using Redis
    - + \ No newline at end of file diff --git a/howtos/leaderboard/index.html b/howtos/leaderboard/index.html index b6df4ac1df..1f39550524 100644 --- a/howtos/leaderboard/index.html +++ b/howtos/leaderboard/index.html @@ -4,7 +4,7 @@ How to build a Real-Time Leaderboard app Using Redis | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    How to build a Real-Time Leaderboard app Using Redis


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    The concept of a leaderboard—a scoreboard showing the ranked names and current scores (or other data points) of the leading competitors—is essential to the world of computer gaming, but leaderboards are now about more than just games. They are about gamification, a broader implementation that can include any group of people with a common goal (coworkers, students, sales groups, fitness groups, volunteers, and so on).

    Leaderboards can encourage healthy competition in a group by openly displaying the current ranking of each group member. They also provide a clear way to view the ongoing achievements of the entire team as members move towards a goal. Gamification of tasks and goals via leaderboards is a great way to motivate people by providing them with constant feedback of where they rank in comparison to other group members. Done well, this can lead to healthy competition that builds group cohesion.

    My Image

    Step 1. Install the below software

    Step 2. Clone the repository

    git clone https://github.com/redis-developer/basic-redis-leaderboard-demo-java

    Step 3. Run docker compose

    docker network create global
    docker-compose up -d --build

    Step 4. Verifying if containers are up and running

     docker-compose ps
    Name Command State Ports
    --------------------------------------------------------------------------------------------------
    redis.redisleaderboard.docker docker-entrypoint.sh redis ... Up 127.0.0.1:55000->6379/tcp

    Step 5. Copy .env.example to create .env

    Provide the values for environment variables (if needed)

    - REDIS_URL: Redis database endpoint URL
    - REDIS_HOST: Redis server host
    - REDIS_PORT: Redis server port
    - REDIS_DB: Redis server db index
    - REDIS_PASSWORD: Redis server password

    If you're using Redis Enterprise Cloud, you must supply DB endpoint, password, port and the name of the database. In case of local system, the entries look like as shown below:

    REDIS_URL=
    REDIS_HOST=redis://localhost
    REDIS_PORT=6379
    REDIS_PASSWORD=
    REDIS_DB=

    Step 6. Run the backend

    • Install gradle

    Follow the following link https://gradle.org/install/ for your MacOS

    brew install gradle
    • Install JDK

    Follow the following link https://docs.oracle.com/javase/10/install/installation-jdk-and-jre-macos.htm for your MacOS

    export $(cat .env | xargs)

    Step 7. Run the wrapper task

    To use Wrapper, we need to generate some particular files. We'll generate these files using the built-in Gradle task called wrapper. Note that we need to generate these files only once.

    Now, let's run the wrapper task in our project directory:

    gradle wrapper

    It should show the below results:

    Welcome to Gradle 6.8.3!

    Here are the highlights of this release:
    - Faster Kotlin DSL script compilation
    - Vendor selection for Java toolchains
    - Convenient execution of tasks in composite builds
    - Consistent dependency resolution

    For more details see https://docs.gradle.org/6.8.3/release-notes.html

    Starting a Gradle Daemon (subsequent builds will be faster)

    BUILD SUCCESSFUL in 29s
    1 actionable task: 1 executed

    Step 8. Perform the build task

    The Gradle Wrapper is now available for building your project. It's time to run the wrapper script to perform the build task.

    ./gradlew build
    % ./gradlew build
    Downloading https://services.gradle.org/distributions/gradle-6.8.3-bin.zip
    ..........10%..........20%..........30%...........40%..........50%..........60%..........70%...........80%..........90%..........100%
    Starting a Gradle Daemon, 1 incompatible Daemon could not be reused, use --status for details

    > Task :test
    2021-03-01 07:08:42.962 INFO 3624 --- [extShutdownHook] o.s.s.concurrent.ThreadPoolTaskExecutor : Shutting down ExecutorService 'applicationTaskExecutor'

    BUILD SUCCESSFUL in 1m 13s
    12 actionable tasks: 12 executed

    Step 9. Run your application

    ./gradlew run
    > Task :run

    . ____ _ __ _ _
    /\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \
    ( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \
    \\/ ___)| |_)| | | | | || (_| | ) ) ) )
    ' |____| .__|_| |_|_| |_\__, | / / / /
    =========|_|==============|___/=/_/_/_/
    :: Spring Boot :: (v2.4.1)

    2021-03-01 07:09:59.610 INFO 3672 --- [ restartedMain] BasicRedisLeaderLoardDemoJavaApplication : Starting BasicRedisLeaderLoardDemoJavaApplication using Java 13.0.2 on Ajeets-MacBook-Pro.local with PID 3672 (/Users/ajeetraina/projects/basic-redis-leaderboard-demo-java/build/classes/java/main started by ajeetraina in /Users/ajeetraina/projects/basic-redis-leaderboard-demo-java)
    2021-03-01 07:09:59.614 INFO 3672 --- [ restartedMain] BasicRedisLeaderLoardDemoJavaApplication : No active profile set, falling back to default profiles: default
    2021-03-01 07:09:59.661 INFO 3672 --- [ restartedMain] .e.DevToolsPropertyDefaultsPostProcessor : Devtools property defaults active! Set 'spring.devtools.add-properties' to 'false' to disable
    2021-03-01 07:09:59.661 INFO 3672 --- [ restartedMain] .e.DevToolsPropertyDefaultsPostProcessor : For additional web related logging consider setting the 'logging.level.web' property to 'DEBUG'
    2021-03-01 07:10:00.481 INFO 3672 --- [ restartedMain] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat initialized with port(s): 5000 (http)
    2021-03-01 07:10:00.492 INFO 3672 --- [ restartedMain] o.apache.catalina.core.StandardService : Starting service [Tomcat]
    2021-03-01 07:10:00.492 INFO 3672 --- [ restartedMain] org.apache.catalina.core.StandardEngine : Starting Servlet engine: [Apache Tomcat/9.0.41]
    2021-03-01 07:10:00.551 INFO 3672 --- [ restartedMain] o.a.c.c.C.[Tomcat].[localhost].[/] : Initializing Spring embedded WebApplicationContext
    2021-03-01 07:10:00.551 INFO 3672 --- [ restartedMain] w.s.c.ServletWebServerApplicationContext : Root WebApplicationContext: initialization completed in 889 ms
    2021-03-01 07:10:00.756 INFO 3672 --- [ restartedMain] o.s.s.concurrent.ThreadPoolTaskExecutor : Initializing ExecutorService 'applicationTaskExecutor'
    2021-03-01 07:10:00.845 INFO 3672 --- [ restartedMain] o.s.b.a.w.s.WelcomePageHandlerMapping : Adding welcome page: URL [file:/Users/ajeetraina/projects/basic-redis-leaderboard-demo-java/assets/index.html]
    2021-03-01 07:10:00.949 INFO 3672 --- [ restartedMain] .s.s.UserDetailsServiceAutoConfiguration :

    Using generated security password: ea2d5326-b04c-4f93-b771-57bcb53f656e

    2021-03-01 07:10:01.016 INFO 3672 --- [ restartedMain] o.s.s.web.DefaultSecurityFilterChain : Will secure any request with [org.springframework.security.web.context.request.async.WebAsyncManagerIntegrationFilter@583fa06c, org.springframework.security.web.context.SecurityContextPersistenceFilter@524c0386, org.springframework.security.web.header.HeaderWriterFilter@c6e5d4e, org.springframework.security.web.authentication.logout.LogoutFilter@3e1f33e9, org.springframework.security.web.savedrequest.RequestCacheAwareFilter@6790427f, org.springframework.security.web.servletapi.SecurityContextHolderAwareRequestFilter@40ddf86, org.springframework.security.web.authentication.AnonymousAuthenticationFilter@1412ffa9, org.springframework.security.web.session.SessionManagementFilter@3eb6c20f, org.springframework.security.web.access.ExceptionTranslationFilter@21646e94, org.springframework.security.web.access.intercept.FilterSecurityInterceptor@649e1b25]
    2021-03-01 07:10:01.043 INFO 3672 --- [ restartedMain] o.s.b.d.a.OptionalLiveReloadServer : LiveReload server is running on port 35729
    2021-03-01 07:10:01.065 INFO 3672 --- [ restartedMain] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat started on port(s): 5000 (http) with context path ''
    2021-03-01 07:10:01.093 INFO 3672 --- [ restartedMain] BasicRedisLeaderLoardDemoJavaApplication : Started BasicRedisLeaderLoardDemoJavaApplication in 1.937 seconds (JVM running for 2.327)
    <=========----> 75% EXECUTING [17s]
    > :run

    Step 10. Access the leaderboard application

    My Image

    How it works?

    How the data is stored:

    • The AAPL's details - market cap of 2.6 triillions and USA origin - are stored in a hash like below:

       HSET "company:AAPL" symbol "AAPL" market_cap "2600000000000" country USA
    • The Ranks of AAPL of 2.6 trillions are stored in a ZSET.

       ZADD  companyLeaderboard 2600000000000 company:AAPL

    How the data is accessed:

    • Top 10 companies:

       ZREVRANGE companyLeaderboard 0 9 WITHSCORES
    • All companies:

       ZREVRANGE companyLeaderboard 0 -1 WITHSCORES
    • Bottom 10 companies:

       ZRANGE companyLeaderboard 0 9 WITHSCORES
    • Between rank 10 and 15:

       ZREVRANGE companyLeaderboard 9 14 WITHSCORES
    • Show ranks of AAPL, FB and TSLA:

       ZREVRANGE  companyLeaderBoard company:AAPL company:FB company:TSLA
    • Adding 1 billion to market cap of FB company:

       ZINCRBY companyLeaderBoard 1000000000 "company:FB"
    • Reducing 1 billion of market cap of FB company:

       ZINCRBY companyLeaderBoard -1000000000 "company:FB"
    • Companies between 500 billion and 1 trillion:

       ZCOUNT companyLeaderBoard 500000000000 1000000000000
    • Companies over a Trillion:

       ZCOUNT companyLeaderBoard 1000000000000 +inf

    References

    - + \ No newline at end of file diff --git a/howtos/moviesdatabase/advancedoption/index.html b/howtos/moviesdatabase/advancedoption/index.html index e592ff57d7..5948787535 100644 --- a/howtos/moviesdatabase/advancedoption/index.html +++ b/howtos/moviesdatabase/advancedoption/index.html @@ -4,7 +4,7 @@ 9. Advanced Option | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    9. Advanced Option

    Create an index using a Filter

    In the previous examples, the indices were created using a PREFIX, where all the keys matching the type and prefix are indexed.

    It is also possible to create an index using a filter, for example create an index with all the "Drama" movies released between 1990 and 2000 (2000 not included).

    The FILTER` expression is using the aggregation filter syntax, for example for the genre and release year it will be

    • FILTER "@genre=='Drama' && @release_year>=1990 && @release_year<2000"

    So when you create the index:

    FT.CREATE idx:drama ON Hash PREFIX 1 "movie:" FILTER "@genre=='Drama' && @release_year>=1990 && @release_year<2000" SCHEMA title TEXT SORTABLE release_year NUMERIC SORTABLE

    You can run the FT.INFO idx:drama command to look at the index definitions and statistics.

    Notes

    • The PREFIX is not optional.
    • In this appliation this index is not useful since you can get the same data from the idx:movie

    You can check that the data has been indexed by running the following queries that should return the same number of documents.

    On idx:drama

    > FT.SEARCH idx:drama "  @release_year:[1990 (2000]" LIMIT 0 0

    1) (integer) 24

    On idx"movie

    > FT.SEARCH idx:movie "@genre:{Drama}  @release_year:[1990 (2000]" LIMIT 0 0

    1) (integer) 24
    - + \ No newline at end of file diff --git a/howtos/moviesdatabase/aggregation/index.html b/howtos/moviesdatabase/aggregation/index.html index fcfd257a59..cfa903f73e 100644 --- a/howtos/moviesdatabase/aggregation/index.html +++ b/howtos/moviesdatabase/aggregation/index.html @@ -4,7 +4,7 @@ 8. Aggregations | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    8. Aggregations

    A common need for applications, in addition to retrieving information as a document list, like you have done with the "FT.SEARCH" command, is to do some "aggregation".

    For example if we look at the movie documents, you may want to retrieve the number of movies grouped by release year starting with the most recent ones.

    For this, Redis Stack provides the FT.AGGREGATE command, with aggregations described as a data processing pipeline.

    Let's check out some examples.

    Group By & Sort By

    Number of movies by year
    > FT.AGGREGATE "idx:movie" "*" GROUPBY 1 @release_year REDUCE COUNT 0 AS nb_of_movies

    1) (integer) 60
    2) 1) "release_year"
    2) "1964"
    3) "nb_of_movies"
    4) "9"
    ...
    61) 1) "release_year"
    2) "2010"
    3) "nb_of_movies"
    4) "15"

    Number of movies by year from the most recent to the oldest
    > FT.AGGREGATE "idx:movie" "*" GROUPBY 1 @release_year REDUCE COUNT 0 AS nb_of_movies SORTBY 2 @release_year DESC

    1) (integer) 60
    2) 1) "release_year"
    2) "2019"
    3) "nb_of_movies"
    4) "14"
    ...
    11) 1) "release_year"
    2) "2010"
    3) "nb_of_movies"
    4) "15"

    Number of movies by genre, with the total number of votes, and average rating
    > FT.AGGREGATE idx:movie "*" GROUPBY 1 @genre REDUCE COUNT 0 AS nb_of_movies REDUCE SUM 1 votes AS nb_of_votes REDUCE AVG 1 rating AS avg_rating SORTBY 4 @avg_rating DESC @nb_of_votes DESC


    1) (integer) 26
    2) 1) "genre"
    2) "fantasy"
    3) "nb_of_movies"
    4) "1"
    5) "nb_of_votes"
    6) "1500090"
    7) "avg_rating"
    8) "8.8"
    ...
    11) 1) "genre"
    2) "romance"
    3) "nb_of_movies"
    4) "2"
    5) "nb_of_votes"
    6) "746"
    7) "avg_rating"
    8) "6.65"

    Count the number of females by country sorted from the biggest to smallest number.
    > FT.AGGREGATE idx:user "@gender:{female}" GROUPBY 1 @country REDUCE COUNT 0 AS nb_of_users SORTBY 2 @nb_of_users DESC

    1) (integer) 193
    2) 1) "country"
    2) "china"
    3) "nb_of_users"
    4) "537"
    ...
    11) 1) "country"
    2) "ukraine"
    3) "nb_of_users"
    4) "72"

    Apply Functions

    Number of logins per year and month

    The idx:user index contains the last_login field. This field stores the last login time as an EPOC timestamp.

    Redis Stack search aggregation allows you to apply transformations to each record. This is done using the APPLY parameter.

    For this example you have to use a date/time function to extract the month and year from the timestamp.

    > FT.AGGREGATE idx:user * APPLY year(@last_login) AS year APPLY "monthofyear(@last_login) + 1" AS month GROUPBY 2 @year @month REDUCE count 0 AS num_login SORTBY 4 @year ASC @month ASC

    1) (integer) 13
    2) 1) "year"
    2) "2019"
    3) "month"
    4) "9"
    5) "num_login"
    6) "230"
    ...
    14) 1) "year"
    2) "2020"
    3) "month"
    4) "9"
    5) "num_login"
    6) "271"


    Number of logins per weekday

    Using the date/time Apply functions it is possible to extract the day of the week from the timestamp, so let's see how the logins are distributed over the week.

    > FT.AGGREGATE idx:user * APPLY "dayofweek(@last_login) +1" AS dayofweek GROUPBY 1 @dayofweek REDUCE count 0 AS num_login SORTBY 2 @dayofweek ASC

    1) (integer) 7
    2) 1) "dayofweek"
    2) "1"
    3) "num_login"
    4) "815"
    ...
    8) 1) "dayofweek"
    2) "7"
    3) "num_login"
    4) "906"


    Filter

    In the previous example you used the query string parameter to select all documents ("*") or a subset of the documents ("@gender:{female}")

    It is also possible to filter the results using a predicate expression relating to values in each result. This is applied post-query and relates to the current state of the pipeline. This is done using the FILTER parameter.

    Count the number of females by country, except China, with more than 100 users, and sorted from the biggest to lowest number
    > FT.AGGREGATE idx:user "@gender:{female}" GROUPBY 1 @country  REDUCE COUNT 0 AS nb_of_users  FILTER "@country!='china' && @nb_of_users > 100" SORTBY 2 @nb_of_users DESC

    1) (integer) 163
    2) 1) "country"
    2) "indonesia"
    3) "nb_of_users"
    4) "309"
    ...
    6) 1) "country"
    2) "brazil"
    3) "nb_of_users"
    4) "108"

    Number of login per month, for year 2020

    This is similar to the previous query with the addition of a filter on the year.

    > FT.AGGREGATE idx:user * APPLY year(@last_login) AS year APPLY "monthofyear(@last_login) + 1" AS month GROUPBY 2 @year @month REDUCE count 0 AS num_login  FILTER "@year==2020" SORTBY 2 @month ASC

    1) (integer) 13
    2) 1) "year"
    2) "2020"
    3) "month"
    4) "1"
    5) "num_login"
    6) "520"
    ...
    10) 1) "year"
    2) "2020"
    3) "month"
    4) "9"
    5) "num_login"
    6) "271"


    - + \ No newline at end of file diff --git a/howtos/moviesdatabase/create/index.html b/howtos/moviesdatabase/create/index.html index 03aa7ba5da..57e09fa435 100644 --- a/howtos/moviesdatabase/create/index.html +++ b/howtos/moviesdatabase/create/index.html @@ -4,7 +4,7 @@ 3. Create Index | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    3. Create Index

    Before creating the index let's describe the dataset and insert entries.

    Sample Dataset

    In this project you will use a simple dataset describing movies, for now, all records are in English. You will learn more about other languages in another tutorial.

    A movie is represented by the following attributes:

    • movie_id : The unique ID of the movie, internal to this database
    • title : The title of the movie.
    • plot : A summary of the movie.
    • genre : The genre of the movie, for now a movie will only have a single genre.
    • release_year : The year the movie was released as a numerical value.
    • rating : A numeric value representing the public's rating for this movie.
    • votes : Number of votes.
    • poster : Link to the movie poster.
    • imdb_id : id of the movie in the IMDB database.

    Key and Data structure

    As a Redis developer, one of the first things to look when building your application is to define the structure of the key and data (data design/data modeling).

    A common way of defining the keys in Redis is to use specific patterns in them. For example in this application where the database will probably deal with various business objects: movies, actors, theaters, users, ... we can use the following pattern:

    • business_object:key

    For example:

    • movie:001 for the movie with the id 001
    • user:001 the user with the id 001

    and for the movies information you should use a Redis Hash.

    A Redis Hash allows the application to structure all the movie attributes in individual fields; also Redis Stack will index the fields based on the index definition.

    Insert Movies

    It is time now to add some data into your database, let's insert a few movies, using redis-cli or RedisInsight.

    Once you are connected to your Redis instance run the following commands:


    > HSET movie:11002 title "Star Wars: Episode V - The Empire Strikes Back" plot "After the Rebels are brutally overpowered by the Empire on the ice planet Hoth, Luke Skywalker begins Jedi training with Yoda, while his friends are pursued by Darth Vader and a bounty hunter named Boba Fett all over the galaxy." release_year 1980 genre "Action" rating 8.7 votes 1127635 imdb_id tt0080684

    > HSET movie:11003 title "The Godfather" plot "The aging patriarch of an organized crime dynasty transfers control of his clandestine empire to his reluctant son." release_year 1972 genre "Drama" rating 9.2 votes 1563839 imdb_id tt0068646

    > HSET movie:11004 title "Heat" plot "A group of professional bank robbers start to feel the heat from police when they unknowingly leave a clue at their latest heist." release_year 1995 genre "Thriller" rating 8.2 votes 559490 imdb_id tt0113277

    > HSET "movie:11005" title "Star Wars: Episode VI - Return of the Jedi" genre "Action" votes 906260 rating 8.3 release_year 1983 plot "The Rebels dispatch to Endor to destroy the second Empire's Death Star." ibmdb_id "tt0086190"

    Now it is possible to get information from the hash using the movie ID. For example if you want to get the title, and rating execute the following command:

    > HMGET movie:11002 title rating

    1) "Star Wars: Episode V - The Empire Strikes Back"
    2) "8.7"

    And you can increment the rating of this movie using:

    > HINCRBYFLOAT movie:11002 rating 0.1
    "8.8"

    But how do you get a movie or list of movies by year of release, rating or title?

    One option, would be to read all the movies, check all fields and then return only matching movies; no need to say that this is a really bad idea.

    Nevertheless this is where Redis developers often create custom secondary indexes using SET/SORTED SET structures that point back to the movie hash. This needs some heavy design and implementation.

    This is where Search and Query in Redis Stack can help, and why it was created.

    Search & Indexing

    Redis Stack greatly simplifies this by offering a simple and automatic way to create secondary indices on Redis Hashes. (more datastructure will eventually come)

    Secondary Index

    When using Redis Stack, if you want to query on a field, you must first index that field. Let's start by indexing the following fields for our movies:

    • Title
    • Release Year
    • Rating
    • Genre

    When creating a index you define:

    • which data you want to index: all hashes with a key starting with movies
    • which fields in the hashes you want to index using a Schema definition.

    Warning: Do not index all fields

    Indexes take space in memory, and must be updated when the primary data is updated. So create the index carefully and keep the definition up to date with your needs.

    Create the Index

    Create the index with the following command:

    > FT.CREATE idx:movie ON hash PREFIX 1 "movie:" SCHEMA title TEXT SORTABLE release_year NUMERIC SORTABLE rating NUMERIC SORTABLE genre TAG SORTABLE

    Before running some queries let's look at the command in detail:

    • FT.CREATE : creates an index with the given spec. The index name will be used in all the key names so keep it short.
    • idx:movie : the name of the index
    • ON hash : the type of structure to be indexed.
    • PREFIX 1 "movie:" : the prefix of the keys that should be indexed. This is a list, so since we want to only index movie:* keys the number is 1. Suppose you want to index movies and tv_show that have the same fields, you can use: PREFIX 2 "movie:" "tv_show:"
    • SCHEMA ...: defines the schema, the fields and their type, to index, as you can see in the command, we are using TEXT, NUMERIC and TAG, and SORTABLE parameters.

    You can find information about the FT.CREATE command in the documentation.

    You can look at the index information with the following command:

    > FT.INFO idx:movie
    - + \ No newline at end of file diff --git a/howtos/moviesdatabase/getting-started/index.html b/howtos/moviesdatabase/getting-started/index.html index de69f4c615..83bbb4672b 100644 --- a/howtos/moviesdatabase/getting-started/index.html +++ b/howtos/moviesdatabase/getting-started/index.html @@ -4,7 +4,7 @@ 1. Getting Started | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    1. Getting Started


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Redis Stack includes a real-time indexing and search engine. Search and Query in Redis provide a simple and fast way to index and query data using any field (secondary index), and do search and aggregation on an indexed dataset. In this tutorial you will learn how to use Search and Query which provides an indexing and full text search engine for Redis.

    • Install Redis Stack & Insert Data
    • Create Index
    • Query Data
    • Manage Indexes
    • Import Sample Dataset
    • Querying the Movie Dataset
    • Aggregation
    • Advanced Options
    • Sample Application

    Let's get started...

    - + \ No newline at end of file diff --git a/howtos/moviesdatabase/import/index.html b/howtos/moviesdatabase/import/index.html index 8f054f9d8d..e003747d48 100644 --- a/howtos/moviesdatabase/import/index.html +++ b/howtos/moviesdatabase/import/index.html @@ -4,7 +4,7 @@ 6. Import datasets | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Sample Dataset

    In the previous steps you used only a few movies, let's now import:

    • More movies to discover more queries.
    • Theaters to discover the geospatial capabilities.
    • Users to do some aggregations.

    Dataset Description

    Movies

    The file https://raw.githubusercontent.com/RediSearch/redisearch-getting-started/master/sample-app/redisearch-docker/dataset/import_movies.redis is a script that creates 922 Hashes.

    The movie hashes contain the following fields.

    • movie:id : The unique ID of the movie, internal to this database (used as the key of the hash)
    • title : The title of the movie.
    • plot : A summary of the movie.
    • genre : The genre of the movie, for now a movie will only have a single genre.
    • release_year : The year the movie was released as a numerical value.
    • rating : A numeric value representing the public's rating for this movie.
    • votes : Number of votes.
    • poster : Link to the movie poster.
    • imdb_id : id of the movie in the IMDB database.
    <details>
    <summary>Sample Data: <b>movie:343</b></summary>
    <table>
    <thead>
    <tr>
    <th>Field</th>
    <th>Value</th>
    </tr>
    </thead>
    <tbody>
    <tr>
    <th>title</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    Spider-Man
    </td>
    </tr>
    <tr>
    <th>plot</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
        When bitten by a genetically modified spider a nerdy shy and awkward high school student gains spider-like abilities that he eventually must use to fight evil as a superhero after tragedy befalls his family.
            </td>
    </tr>
    <tr>
    <th>genre</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    Action
    </td>
    </tr>
    <tr>
    <th>release_year</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    2002
    </td>
    </tr>
    <tr>
    <th>rating</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    7.3
    </td>
    </tr>
    <tr>
    <th>votes</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    662219
    </td>
    </tr>
    <tr>
    <th>poster</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    https://m.media-amazon.com/images/M/MV5BZDEyN2NhMjgtMjdhNi00MmNlLWE5YTgtZGE4MzNjMTRlMGEwXkEyXkFqcGdeQXVyNDUyOTg3Njg@._V1_SX300.jpg
    </td>
    </tr>
    <tr>
    <th>imdb_id</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    tt0145487
    </td>
    </tr>
    <tbody>
    </table>
    </details>

    Theaters

    The file https://raw.githubusercontent.com/RediSearch/redisearch-getting-started/master/sample-app/redisearch-docker/dataset/import_theaters.redis is a script that creates 117 Hashes (used for Geospatial queries). This dataset is a list of New York Theaters, and not movie theaters, but it is not that critical for this project ;).

    The theater hashes contain the following fields.

    • theater:id : The unique ID of the theater, internal to this database (used as the key of the hash)
    • name : The name of the theater
    • address : The street address
    • city : The city, in this sample dataset all the theaters are in New York
    • zip : The zip code
    • phone : The phone number
    • url : The URL of the theater
    • location : Contains the longitude,latitude used to create the Geo-indexed field
    <details>
    <summary>Sample Data: <b>theater:20</b></summary>
    <table>
    <thead>
    <tr>
    <th>Field</th>
    <th>Value</th>
    </tr>
    </thead>
    <tbody>
    <tr>
    <th>name</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    Broadway Theatre
    </td>
    </tr>
    <tr>
    <th>address</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    1681 Broadway
    </td>
    </tr>
    <tr>
    <th>city</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    New York
    </td>
    </tr>
    <tr>
    <th>zip</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    10019
    </td>
    </tr>
    <tr>
    <th>phone</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    212 944-3700
    </td>
    </tr>
    <tr>
    <th>url</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    http://www.shubertorganization.com/theatres/broadway.asp
    </td>
    </tr>
    <tr>
    <th>location</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    -73.98335054631019,40.763270202723625
    </td>
    </tr>
    <tbody>
    </table>
    </details>

    Users

    The file https://raw.githubusercontent.com/RediSearch/redisearch-getting-started/master/sample-app/redisearch-docker/dataset/import_users.redis is a script that creates 5996 Hashes.

    The user hashes contain the following fields.

    • user:id : The unique ID of the user.
    • first_name : The first name of the user.
    • last_name : The last name of the user.
    • email : The email of the user.
    • gender : The gender of the user (female/male).
    • country : The country name of the user.
    • country_code : The country code of the user.
    • city : The city of the user.
    • longitude : The longitude of the user.
    • latitude : The latitude of the user.
    • last_login : The last login time for the user, as EPOC time.
    • ip_address : The IP address of the user.
    <details>
    <summary>Sample Data: <b>user:3233</b></summary>
    <table>
    <thead>
    <tr>
    <th>Field</th>
    <th>Value</th>
    </tr>
    </thead>
    <tbody>
    <tr>
    <th>first_name</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    Rosetta
    </td>
    </tr>
    <tr>
    <th>last_name</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    Olyff
    </td>
    </tr>
    <tr>
    <th>email</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    rolyff6g@163.com
    </td>
    </tr>
    <tr>
    <th>gender</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    female
    </td>
    </tr>
    <tr>
    <th>country</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    China
    </td>
    </tr>
    <tr>
    <th>country_code</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    CN
    </td>
    </tr>
    <tr>
    <th>city</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    Huangdao
    </td>
    </tr>
    <tr>
    <th>longitude</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    120.04619
    </td>
    </tr>
    <tr>
    <th>latitude</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    35.872664
    </td>
    </tr>
    <tr>
    <th>last_login</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    1570386621
    </td>
    </tr>
    <tr>
    <th>ip_address</th>
    <td style='font-family:monospace; font-size: 0.875em; "'>
    218.47.90.79
    </td>
    </tr>
    <tbody>
    </table>
    </details>

    Importing the Movies, Theaters and Users

    Before importing the data, flush the database:

    > FLUSHALL

    The easiest way to import the file is to use the redis-cli, using the following terminal command:

    $ curl -s https://raw.githubusercontent.com/RediSearch/redisearch-getting-started/master/sample-app/redisearch-docker/dataset/import_movies.redis | redis-cli -h localhost -p 6379 --pipe

    $ curl -s https://raw.githubusercontent.com/RediSearch/redisearch-getting-started/master/sample-app/redisearch-docker/dataset/import_theaters.redis | redis-cli -h localhost -p 6379 --pipe


    $ curl -s https://raw.githubusercontent.com/RediSearch/redisearch-getting-started/master/sample-app/redisearch-docker/dataset/import_users.redis | redis-cli -h localhost -p 6379 --pipe

    Using Redis Insight or the redis-cli you can look at the dataset:

    > HMGET "movie:343" title release_year genre

    1) "Spider-Man"
    2) "2002"
    3) "Action"


    > HMGET "theater:20" name location
    1) "Broadway Theatre"
    2) "-73.98335054631019,40.763270202723625"



    > HMGET "user:343" first_name last_name last_login
    1) "Umeko"
    2) "Castagno"
    3) "1574769122"

    You can also use the DBSIZE command to see how many keys you have in your database.


    Create Indexes

    Create the idx:movie index:

    > FT.CREATE idx:movie ON hash PREFIX 1 "movie:" SCHEMA title TEXT SORTABLE plot TEXT WEIGHT 0.5 release_year NUMERIC SORTABLE rating NUMERIC SORTABLE votes NUMERIC SORTABLE genre TAG SORTABLE

    "OK"

    The movies have now been indexed, you can run the FT.INFO "idx:movie" command and look at the num_docs returned value. (should be 922).

    Create the idx:theater index:

    This index will mostly be used to show the geospatial capabilties of search in Redis Stack.

    In the previous examples we have created indexes with 3 types:

    • Text
    • Numeric
    • Tag

    You will now discover a new type of field: Geo.

    The theater hashes contains a field location with the longitude and latitude, that will be used in the index as follows:

    > FT.CREATE idx:theater ON hash PREFIX 1 "theater:" SCHEMA name TEXT SORTABLE location GEO

    "OK"

    The theaters have been indexed, you can run the FT.INFO "idx:theater" command and look at the num_docs returned value. (should be 117).

    Create the idx:user index:

    > FT.CREATE idx:user ON hash PREFIX 1 "user:" SCHEMA gender TAG country TAG SORTABLE last_login NUMERIC SORTABLE location GEO

    "OK"
    - + \ No newline at end of file diff --git a/howtos/moviesdatabase/index.html b/howtos/moviesdatabase/index.html index 9f1bb631fa..3ec68a0605 100644 --- a/howtos/moviesdatabase/index.html +++ b/howtos/moviesdatabase/index.html @@ -4,7 +4,7 @@ How to list and search Movies Database using Redis Stack | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    How to list and search Movies Database using Redis Stack

    - + \ No newline at end of file diff --git a/howtos/moviesdatabase/install/index.html b/howtos/moviesdatabase/install/index.html index 54d1b5db6f..bde5e0d244 100644 --- a/howtos/moviesdatabase/install/index.html +++ b/howtos/moviesdatabase/install/index.html @@ -4,7 +4,7 @@ 2. Install Redis Stack | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    2. Install Redis Stack

    You have multiple ways to run Redis Stack:

    Let's use Docker for now.

    1.1 Open a terminal an run the following command

    > docker run -it --rm --name redis-stack-latest \
    -p 6379:6379 \
    redis/redis-stack:latest
    note

    The container will automatically be removed when it exits (--rm parameter).

    You have now a Redis instance running with Redis Stack installed, let's discover the basics.


    - + \ No newline at end of file diff --git a/howtos/moviesdatabase/manage/index.html b/howtos/moviesdatabase/manage/index.html index 50c99a44a8..371037c1b5 100644 --- a/howtos/moviesdatabase/manage/index.html +++ b/howtos/moviesdatabase/manage/index.html @@ -4,7 +4,7 @@ 5. Manage Index | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    5. Manage Index

    Listing and inspecting the indexes

    The FT._LIST command provides a list of all indexes in your database:

    > FT._LIST
    1) "idx:movie"

    FT.INFO provides information about a specific index:

    > FT.INFO "idx:movie"

    1) "index_name"
    2) "idx:movie"
    ...
    5) "index_definition"
    ...
    7) "fields"
    ...
    9) "num_docs"
    10) "4"
    ...

    Updating your Indexing

    As you are build your application and add more information to the database you may need to add new fields to the index. The FT.ALTER command enables you to do this.

    > FT.ALTER idx:movie SCHEMA ADD plot TEXT WEIGHT 0.5
    "OK"

    The WEIGHT declares the importance of this field when calculating result accuracy. This is a multiplication factor (default is 1); so in this example the plot is less important than the title.

    Let's do a query with the new indexed field:

    > FT.SEARCH idx:movie "empire @genre:{Action}" RETURN 2 title plot

    Dropping the Index

    You can drop an index using the FT.DROPINDEX command.

    > FT.DROPINDEX idx:movie

    "OK"

    Dropping an index does not impact the indexed hashes, this means that the movies are still inside the database.

    >SCAN 0 MATCH movie:*

    1) "0"
    2) 1) "movie:11002"
    2) "movie:11004"
    3) "movie:11003"
    4) "movie:11005"
    note

    You can delete the indexed document/hashes by adding the DD parameter.

    - + \ No newline at end of file diff --git a/howtos/moviesdatabase/query/index.html b/howtos/moviesdatabase/query/index.html index f7353d5e57..72ed2f7ef7 100644 --- a/howtos/moviesdatabase/query/index.html +++ b/howtos/moviesdatabase/query/index.html @@ -4,7 +4,7 @@ 4. Query Data | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    4. Query Data

    The database contains a few movies, and an index, it is now possible to execute some queries.

    Queries

    Example : All the movies that contains the string "war"

    > FT.SEARCH idx:movie "war"

    1) (integer) 2
    2) "movie:11005"
    3) 1) "title"
    2) "Star Wars: Episode VI - Return of the Jedi"
    ...
    14) "tt0086190"
    4) "movie:11002"
    5) 1) "title"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    ...
    13) "imdb_id"
    14) "tt0080684"

    The FT.SEARCH commands returns a list of results starting with the number of results, then the list of elements (keys & fields).

    As you can see the movie Star Wars: Episode V - The Empire Strikes Back is found, even though you used only the word “war” to match “Wars” in the title. This is because the title has been indexed as text, so the field is tokenized and stemmed.

    Later when looking at the query syntax in more detail you will learn more about the search capabilities.

    It is also possible to limit the list of fields returned by the query using the RETURN parameter, let's run the same query, and return only the title and release_year:

    > FT.SEARCH idx:movie "war" RETURN 2 title release_year

    1) (integer) 2
    2) "movie:11005"
    3) 1) "title"
    2) "Star Wars: Episode VI - Return of the Jedi"
    3) "release_year"
    4) "1983"
    4) "movie:11002"
    5) 1) "title"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "release_year"
    4) "1980"

    This query does not specify any "field" and still returns some movies, this is because Search in Redis Stack will search all TEXT fields by default. In the current index only the title is present as a TEXT field. You will see later how to update an index, to add more fields to it.

    If you need to perform a query on a specific field you can specify it using the @field: syntax, for example:

    > FT.SEARCH idx:movie "@title:war" RETURN 2 title release_year

    Example : All the movies that contains the string "war but NOT the jedi one"

    Adding the string -jedi (minus) will ask the query engine not to return values that contain jedi.

    > FT.SEARCH idx:movie "war -jedi" RETURN 2 title release_year

    1) (integer) 1
    2) "movie:11002"
    3) 1) "title"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "release_year"
    4) "1980"

    Example : All the movies that contains the string "gdfather using fuzzy search"

    As you can see the word godfather contains a spelling error, it can however be matched using fuzzy matching. Fuzzy matches are performed based on Levenshtein distance (LD).

    > FT.SEARCH idx:movie " %gdfather% " RETURN 2 title release_year

    1) (integer) 1
    2) "movie:11003"
    3) 1) "title"
    2) "The Godfather"
    3) "release_year"
    4) "1972"

    Example : All Thriller movies"

    The genre fields is indexed as a TAG and allows exact match queries.

    The syntax to query a TAG field is @field_name:{value}

    > FT.SEARCH idx:movie "@genre:{Thriller}" RETURN 2 title release_year

    1) (integer) 1
    2) "movie:11004"
    3) 1) "title"
    2) "Heat"
    3) "release_year"
    4) "1995"


    Example : All Thriller or Action movies"

    > FT.SEARCH idx:movie "@genre:{Thriller|Action}" RETURN 2 title release_year

    1) (integer) 3
    2) "movie:11004"
    3) 1) "title"
    2) "Heat"
    3) "release_year"
    4) "1995"
    4) "movie:11005"
    5) 1) "title"
    2) "Star Wars: Episode VI - Return of the Jedi"
    3) "release_year"
    4) "1983"
    6) "movie:11002"
    7) 1) "title"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "release_year"
    4) "1980"

    You can find more information about the Tag filters in the documentation.


    Example : All Thriller or Action movies that does not have Jedi in the title"

    > FT.SEARCH idx:movie "@genre:{Thriller|Action} @title:-jedi" RETURN 2 title release_year

    1) (integer) 2
    2) "movie:11004"
    3) 1) "title"
    2) "Heat"
    3) "release_year"
    4) "1995"
    4) "movie:11002"
    5) 1) "title"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "release_year"
    4) "1980"

    Example : All the movies released between 1970 and 1980 (included)

    The FT.SEARCH syntax has two ways to query numeric fields:

    • using the FILTER parameter

    or

    • using the @field in the query string.
    > FT.SEARCH idx:movie * FILTER release_year 1970 1980 RETURN 2 title release_year
    > FT.SEARCH idx:movie "@release_year:[1970 1980]" RETURN 2 title release_year

    1) (integer) 2
    2) "movie:11003"
    3) 1) "title"
    2) "The Godfather"
    3) "release_year"
    4) "1972"
    4) "movie:11002"
    5) 1) "title"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "release_year"
    4) "1980"

    To exclude a value prepend it with ( in the FILTER or query string, for example to exclude 1980:

    > FT.SEARCH idx:movie "@release_year:[1970 (1980]" RETURN 2 title release_year

    Insert, Update, Delete and Expire Documents

    As part of this tutorial you have:

    1. Created few movies, as Redis hashes (that we call document) with the following key pattern movie:*
    2. Created an index using the FT.CREATE command
    3. Queried the data using FT.SEARCH

    When creating the index, using the idx:movie ON hash PREFIX 1 "movie:" parameter you are asking the indexing engine to look at all existing keys and index them.

    Also new information that matches this pattern/type, will be indexed.

    Let's count the number of movies, add a new one, and count again:

    > FT.SEARCH idx:movie "*" LIMIT 0 0

    1) (integer) 4


    > HSET movie:11033 title "Tomorrow Never Dies" plot "James Bond sets out to stop a media mogul's plan to induce war between China and the U.K in order to obtain exclusive global media coverage." release_year 1997 genre "Action" rating 6.5 votes 177732 imdb_id tt0120347

    > FT.SEARCH idx:movie "*" LIMIT 0 0

    1) (integer) 5

    The new movie has been indexed. You can also search on any of the indexed fields:

    > FT.SEARCH idx:movie "never" RETURN 2 title release_year

    1) (integer) 1
    2) "movie:11033"
    3) 1) "title"
    2) "Tomorrow Never Dies"
    3) "release_year"
    4) "1997"

    Now you update one of the field, and search for 007

    > HSET movie:11033 title "Tomorrow Never Dies - 007"


    > FT.SEARCH idx:movie "007" RETURN 2 title release_year

    1) (integer) 1
    2) "movie:11033"
    3) 1) "title"
    2) "Tomorrow Never Dies - 007"
    3) "release_year"
    4) "1997"

    When you delete the hash, the index is also updated, and the same happens when the key expires (TTL-Time To Live).

    For example, set the James Bond movie to expire in 20 seconds time:

    > EXPIRE "movie:11033" 20

    You can run the following query, and you will that the document expires after 20 seconds and the search query will not return any results, showing that the index has been updated.

    > FT.SEARCH idx:movie "007" RETURN 2 title release_year

    1) (integer)

    tip

    When you are using Redis as your primary database you are not necessarily using TTLs to delete records. However, if the data you are storing and indexing are transient (e.g a caching layer at the top of another datastore or Web service, query user sessions content, etc.), this is often qualified as an "Ephemeral Search" use case: lightweight, fast and expiration.


    More

    You have many additional features regarding indexing and searching that you can find in the documentation:

    Let's see how to inspect, modify and drop an index.

    - + \ No newline at end of file diff --git a/howtos/moviesdatabase/querymovies/index.html b/howtos/moviesdatabase/querymovies/index.html index febf64dc0a..98c0761b38 100644 --- a/howtos/moviesdatabase/querymovies/index.html +++ b/howtos/moviesdatabase/querymovies/index.html @@ -4,7 +4,7 @@ 7. Query Movies | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Querying the Movie Dataset

    As described earlier in the tutorial, one of the goals of search and query in Redis Stack is to provide rich querying capabilities such as:

    • simple and complex conditions
    • sorting
    • pagination
    • counting

    Conditions

    The best way to start to work with Redis Stack query capabilities is to look at the various conditions options.

    > FT.SEARCH "idx:movie" "heat" RETURN 2 title plot

    1) (integer) 4
    2) "movie:1141"
    3) 1) "title"
    2) "Heat"
    3) "plot"
    4) "A group of professional bank robbers start to feel the heat from police when they unknowingly leave a clue at their latest heist."
    4) "movie:818"
    5) 1) "title"
    2) "California Heat"
    3) "plot"
    4) "A lifeguard bets he can be true to just one woman."
    6) "movie:736"
    7) 1) "title"
    2) "Chicago Justice"
    3) "plot"
    4) "The State's Attorney's dedicated team of prosecutors and investigators navigates heated city politics and controversy head-on,while fearlessly pursuing justice."
    8) "movie:1109"
    9) 1) "title"
    2) "Love & Hip Hop: Miami"
    3) "plot"
    4) "'Love and Hip Hop Miami' turns up the heat and doesn't hold back in making the 305 the place to be. Multi-platinum selling hip-hop legend Trick Daddy is back in the studio collaborating ..."

    The first line contains the number of documents (4) that match the query condition, then the list of movies.

    This query is a "fieldless" condition, this means that the query engine has:

    • searched in all the TEXT fields of the index(title and plot)
    • for the word heat and related words, this is why the movie:736 is returned since it has the word heated in the plot (stemming)
    • returned the result sorted by score, remember that the title has a weight of 1.0, and the plot a weight of 0.5. So when the word or related words are found in the title the score is larger.

    In this case you have to set the criteria to a the field title using the @title notation.

    > FT.SEARCH "idx:movie" "@title:heat" RETURN 2 title plot
    1) (integer) 2
    2) "movie:1141"
    3) 1) "title"
    2) "Heat"
    3) "plot"
    4) "A group of professional bank robbers start to feel the heat from police when they unknowingly leave a clue at their latest heist."
    4) "movie:818"
    5) 1) "title"
    2) "California Heat"
    3) "plot"
    4) "A lifeguard bets he can be true to just one woman."

    So only 2 movies are returned.

    Find all the movies where the title contains 'heat' and does NOT contains 'california'

    For this you add parentheses around the field condition and add the - sign to 'california'.

    > FT.SEARCH "idx:movie" "@title:(heat -california)" RETURN 2 title plot
    1) (integer) 1
    2) "movie:1141"
    3) 1) "title"
    2) "Heat"
    3) "plot"
    4) "A group of professional bank robbers start to feel the heat from police when they unknowingly leave a clue at their latest heist."

    Only one movie is returned.

    If you do not put the ( .. ) the -california condition will be applied to all the text fields.

    You can do test this with the following queries:

    > FT.SEARCH "idx:movie" "@title:(heat -woman)" RETURN 2 title plot
    > FT.SEARCH "idx:movie" "@title:heat -woman" RETURN 2 title plot

    As you can see the first query only searches for woman in the title and returns two movies "Heat" and "California Heat", where the second query eliminates "California Heat" from the list since the plot contains the word woman.

    - + \ No newline at end of file diff --git a/howtos/moviesdatabase/sampleapp/index.html b/howtos/moviesdatabase/sampleapp/index.html index 69c7e6fac8..0deef8cfbf 100644 --- a/howtos/moviesdatabase/sampleapp/index.html +++ b/howtos/moviesdatabase/sampleapp/index.html @@ -4,7 +4,7 @@ 10. Sample Application | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Application Development

    It is time now to see how to use search with Redis Stack in your application.

    Run the Sample Application

    The application and all the services, including Redis Stack, are available as a Docker Compose application.

    If you have not already downloaded the project, clone it:

    > git clone https://github.com/RediSearch/redisearch-getting-started.git

    > cd redisearch-getting-started

    To run the application:

    > cd sample-app

    > docker-compose up --force-recreate --build

    This Docker Compose will start:

    1. Redis Stack instance on port 6380, and import all movies, actors and create indexes
    2. The Java, Node and Python REST Services available on port 8085, 8086, 8087
    3. The frontend on port 8084

    Once started you can access the application and its services using the following URLs:

    • http://localhost:8084
    • http://localhost:8085/api/1.0/movies/search?q=star&offset=0&limit=10
    • http://localhost:8086/api/1.0/movies/search?q=star&offset=0&limit=10
    • http://localhost:8087/api/1.0/movies/search?q=star&offset=0&limit=10

    Stop and Delete Everything

    Run the following command to delete the containers & images:

    > docker-compose down -v --rmi local --remove-orphans
    - + \ No newline at end of file diff --git a/howtos/nlp/index.html b/howtos/nlp/index.html index f28acf10ea..b2280b7898 100644 --- a/howtos/nlp/index.html +++ b/howtos/nlp/index.html @@ -4,7 +4,7 @@ Building a Pipeline for Natural Language Processing using RedisGears | The Home of Redis Developers - + @@ -16,7 +16,7 @@ One need to change a few default parameters for rgcluster to accommodate the size of PyTorch and spacy libraries (each over 1GB zipped), gist with settings.

    Step 5. Create or activate Python virtual environment

     cd ./the-pattern-platform/

    Step 6. Create new environment

    You can create it via

     conda create -n pattern_env python=3.8

    or

    Alternatively, you can activate by using the below CLI:

     source ~/venv_cord19/bin/activate #or create new venv
    pip install -r requirements.txt

    Step 7. Run pipeline

     bash cluster_pipeline.sh

    Step 8. Validating the functionality of the NLP pipeline

    Wait for a bit and then check:

    Verifying Redis Graph populated:

     redis-cli -p 9001 -h 127.0.0.1 GRAPH.QUERY cord19medical "MATCH (n:entity) RETURN count(n) as entity_count"
    redis-cli -p 9001 -h 127.0.0.1 GRAPH.QUERY cord19medical "MATCH (e:entity)-[r]->(t:entity) RETURN count(r) as edge_count"

    Checking API responds:

     curl -i -H "Content-Type: application/json" -X POST -d '{"search":"How does temperature and humidity affect the transmission of 2019-nCoV"}'
    http://localhost:8080/gsearch

    Walkthrough

    While RedisGears allows to deploy and run Machine Learning libraries like spacy and BERT transformers, the solution above uses simpler approach:

     gb = GB('KeysReader')
    gb.filter(filter_language)
    gb.flatmap(parse_paragraphs)
    gb.map(spellcheck_sentences)
    gb.foreach(save_sentences)
    gb.count()
    gb.register('paragraphs:*',keyTypes=['string','hash'], mode="async_local")

    This is the overall pipeline: those 7 lines allow you to run logic in a distributed cluster or on a single machine using all available CPUs - no changes required until you need to scale over more 1000 nodes. I use KeysReader registered for namespace paragraphs for all strings or hashes. My pipeline would need to run in async mode. For data scientists, I would recommend using gb.run to make sure gears function work and it will run in batch mode and then change it to register - to capture new data. By default, functions will return output, hence the need for count() - to prevent fetching the whole dataset back to the command issuing machine (90 GB for Cord19).

    Overall pre-processing is a straightforward - full code is here.

    Things to keep in mind:

    1. Node process can only save locally - we don't move data, anything you want to save should have hashtag, for example to add to the set of processed_docs:
     execute('SADD','processed_docs_{%s}' % hashtag(),article_id)
    1. Loading external libraries into the computational threat, for example, symspell requires additional dictionaries and needs two steps to load:
     """
    load symspell and relevant dictionaries
    """
    sym_spell=None

    def load_symspell():
    import pkg_resources
    from symspellpy import SymSpell, Verbosity
    sym_spell = SymSpell(max_dictionary_edit_distance=1, prefix_length=7)
    dictionary_path = pkg_resources.resource_filename(
    "symspellpy", "frequency_dictionary_en_82_765.txt")
    bigram_path = pkg_resources.resource_filename(
    "symspellpy", "frequency_bigramdictionary_en_243_342.txt")
    # term_index is the column of the term and count_index is the
    # column of the term frequency
    sym_spell.load_dictionary(dictionary_path, term_index=0, count_index=1)
    sym_spell.load_bigram_dictionary(bigram_path, term_index=0, count_index=2)
    return sym_spell
    1. Scispacy is a great library and data science tool, but after a few iterations with deploying it I ended up reading data model documentation for UMLS Methathesaurus and decided to build Aho-Corasick automata directly from UMLS data. (MRXW_ENG.RRF contains all terms form for English mapped to CUI). Aho-Corasick allowed me to match incoming sentences into pairs of nodes (concepts from the medical dictionary) and present sentences as edges in a graph, Gears related code is simple:
     bg = GearsBuilder('KeysReader')
    bg.foreach(process_item)
    bg.count()
    bg.register('sentence:*', mode="async_local",onRegistered=OnRegisteredAutomata)

    OnRegisteredAutomata will perform similarly to symspell example above except it will download pre-build Aho-Corasick automata (30Mb). Aho-Corasick is a very fast matcher and allows to perform >900 Mb text per second even on commodity laptop, RedisGears cluster makes a very smooth distribution of data and ML model and matching using available CPU and Memory. Full matcher code.

    Output of the matcher: nodes and edges are candidates to use another RedisGears pattern rgsync where you can write fast into Redis and RedisGears are going to replicate data into slower storage using RedisStreams. But I decided to use streams and handcraft the population of the RedisGraph database, which will be focus of the next blog post.

    Output of the matcher: nodes and edges are candidates to use another RedisGears pattern rgsync where you can write fast into Redis and RedisGears are going to replicate data into slower storage using RedisStreams, while this demo uses streams and populates RedisGraph database with nodes and edges calculating rank of each.

    Call to action

    We took OCR scans in JSON format and turned them into Knowledge Graph, demonstrating how you can traditional Semantic Network/OWL/Methathesaurus technique based on Unified Medical Language System. Redis Ecosystem offers a lot to the data science community, and can take place at the core of Kaggle notebooks, ML frameworks and make deployment and distribution of data more enjoyable. The success of our industry depends on how our tools work together — regardless of whether they are engineering, data science, machine learning and organisational or architectural.

    With the collaboration of RedisLabs and community, the full pipeline code is available via https://github.com/applied-knowledge-systems/the-pattern-platform. In case, you want to try it locally, then you can find a Docker Launch script in the root of the repository along with short quickstart guide. PR and suggestions are welcome. The overall goal of the project is to allow other to build their more interesting pipeline on top of it.

    References

    - + \ No newline at end of file diff --git a/howtos/popupstore/index.html b/howtos/popupstore/index.html index 0e7d345f73..5ade866402 100644 --- a/howtos/popupstore/index.html +++ b/howtos/popupstore/index.html @@ -4,7 +4,7 @@ Building a Popup Store application using Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Building a Popup Store application using Redis


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Pop-up stores are becoming a popular channel for retailers to create a new revenue stream, generate buzz with customers, test product concepts, or unload excess inventory. Since the idea is to spin up the store quickly and then close it shortly thereafter, it doesn’t make sense to spend a lot of time on development. With the right Redis modules, you can create a robust customer experience without a lot of development effort.

    This pop-up store demo illustrates a company that sells a single product and has 10,000 units available for purchase. Each customer can purchase one unit and the sale lasts only 10 minutes, so order processing must be instantaneous. The demo shows how to visualize data pipeline in real-time using Redis Streams, Redis Time Series, RedisGears and Redis Datasource with Grafana.

    Pop-up

    Step 1. Cloning the repository

    git clone https://github.com/redis-developer/redis-pop-up-store/

    Step 2. Running the application

    docker-compose up -d

    Step 3. Accessing Grafana dashboard

    Open http://IPAddress:3000 to access the grafana dashboard

    Grafana

    Grafana query streams and Time-Series keys every 5 seconds to display samples using Grafana Redis Datasource.This Grafana dashboard displays:

    • Product Available: the value of product key, which decreases as orders complete
    • Customers Ordering, Orders Processing, and Orders Completed: the length of queue:customers, queue:orders, and queue:complete streams
    • Customers Overflow: the difference between customer-submitted orders and orders completed
    • Customers Ordering: orders created in 5 seconds
    • Orders In Queue: orders waiting to be processed
    • Completed Flow: orders completed in 5 seconds

    How it works

    Diagram

    • Node.js script adds random data to Customers and Orders streams
    • RedisGears is using StreamReader to watch all queue: keys and adding Time-Series samples
    # Add Time-Series
    def tsAdd(x):
    xlen = execute('XLEN', x['key'])
    execute('TS.ADD', 'ts:len:'+x['key'], '*', xlen)
    execute('TS.ADD', 'ts:enqueue:' + x['key'], '*', x['value'])


    # Stream Reader for any Queue
    gb = GearsBuilder('StreamReader')
    gb.countby(lambda x: x['key']).map(tsAdd)
    gb.register(prefix='queue:*', duration=5000, batch=10000, trimStream=False)
    • Another RedisGears script completes orders
      • adding data to queue:complete stream
      • deleting client's ordering
      • decreasing product amount
      • trimming Orders queue
    # Complete order
    def complete(x):
    execute('XADD', 'queue:complete', '*', 'order', x['id'],
    'customer', x['value']['customer'])
    execute('XDEL', 'queue:customers', x['value']['customer'])
    execute('DECR', 'product')


    # Stream Reader for Orders queue
    gb = GearsBuilder('StreamReader')
    gb.map(complete)
    gb.register(prefix='queue:orders', batch=3, trimStream=True)

    Addition Resources

    - + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheet/index.html b/howtos/quick-start/cheat-sheet/index.html index fe2cf25de0..153fe04331 100644 --- a/howtos/quick-start/cheat-sheet/index.html +++ b/howtos/quick-start/cheat-sheet/index.html @@ -4,17 +4,17 @@ Redis Commands Cheat sheet | The Home of Redis Developers - +
    -

    Redis Commands Cheat sheet


    Profile picture for Prasan Kumar
    Author:
    Prasan Kumar, Technical Solutions Developer at Redis
    Profile picture for Will Johnston
    Author:
    Will Johnston, Developer Growth Manager at Redis

    Connect

    # Syntax
    redis-cli -u redis://host:port
    redis-cli -u redis://username:password@host:port

    # Examples
    redis-cli
    redis-cli -u redis://localhost:6379
    redis-cli -u redis://myuser:mypassword@localhost:6379

    # If you run Redis through Docker
    docker exec -it <container-id-or-name> redis-cli

    note

    To setup Redis either locally or in the cloud, refer to the tutorial

    Strings/Numbers

    CommandSyntaxExampleOutput
    SETSET key value

    SET myKey "Hello"
    "OK"
    Description: Set key to hold the string value. If key already holds a value, it is overwritten, regardless of its type.Time Complexity: O(1)
    GETGET key

    GET myKey
    "Hello"
    Description: Get the string value of key. If the key does not exist the special value nil is returned.Time Complexity: O(1)
    MGETMGET key [key ...]

    MGET myKey nonExistentKey
    1) "Hello" 2) (nil)
    Description: Returns the values of all specified keys. For every key that does not hold a string value or does not exist, the special value nil is returned.Time Complexity: O(N)
    INCRINCR key

    INCR myCounter
    (integer) 1
    Description: Increments the number stored at key by one. If the key does not exist, it is set to 0 before performing the operation.Time Complexity: O(1)

    Generic

    CommandSyntaxExampleOutput
    KEYSKEYS pattern

    KEYS my*
    1) "myKey" 2) "myCounter"
    Description: Returns all keys matching pattern.Time Complexity: O(N)
    EXISTSEXISTS key [key ...]

    EXISTS myKey
    (integer) 1
    Description: Checks if one or more keys exist.Time Complexity: O(N)
    EXPIREEXPIRE key seconds

    EXPIRE myKey 120
    (integer) 1
    Description: Set a timeout on a key.After the timeout has expired, the key will automatically be deleted.Time Complexity:O(1)
    TTLTTL key

    TTL myKey
    (integer) 113
    Description: Returns the remaining time to live of a key that has a timeout.Time Complexity: O(1)
    PERSISTPERSIST key

    PERSIST myKey
    (integer) 1
    Description: Removes the expiration from a key.Time Complexity:O(1)
    SCANSCAN cursor [MATCH pattern] [COUNT count]

    SCAN 0 MATCH my* COUNT 2
    1) "3" 2) 1) "myCounter" 2) "myKey"
    Description: Iterates the set of keys in the currently selected Redis database.Time Complexity: O(1) for every call. O(N) for a complete iteration.
    DELDEL key [key ...]

    DEL myKey
    (integer) 1
    Description: Removes the specified keys.Time Complexity: O(N)
    INFOINFO [section]

    INFO server
    INFO keyspace
    # Server
    redis_version:6.2.5
    redis_git_sha1:00000000
    redis_build_id:9893b2a-dirty
    redis_mode:standalone
    os:Linux 5.4.72-microsoft-standard-WSL2 x86_64
    arch_bits:64
    ...
    # Keyspace db0:keys=2,expires=0,avg_ttl=0
    Description:Returns information and statistics about the server, with the different sections like - server, clients, memory, persistence, stats, replication, cpu, commandstats, latencystats, sentinel, cluster, modules, keyspace, errorstats.Time Complexity: O(1)

    Hashes

    CommandSyntaxExampleOutput
    HSETHSET key field value [field value ...]

    HSET h_employee_profile:101 name "Nicol" age 33
    (integer) 2
    Description: Sets the specified fields to their respective values in the hash stored at key.Time Complexity: O(N)
    HGETHGET key field

    HGET h_employee_profile:101 name
    "Nicol"
    Description: Returns the value associated with field in the hash stored at key.Time Complexity: O(1)
    HGETALLHGETALL key

    HGETALL h_employee_profile:101
    1) "name" 2) "Nicol" 3) "age" 4) "33"
    Description: Returns all fields and values of the hash stored at key.Time Complexity: O(N)
    HMGETHMGET key field1 [field2]

    HMGET h_employee_profile:101 name age
    1) "Nicol" 2) "33"
    Description: Returns the values associated with the specified fields in the hash stored at key.Time Complexity: O(N)

    Sets

    CommandSyntaxExampleOutput
    SADDSADD key member [member ...]

    SADD mySet "Hello"
    (integer) 1
    Description: Adds the specified members to the set stored at key.Time Complexity: O(N)
    SMEMBERSSMEMBERS key

    SMEMBERS mySet
    1) "Hello"
    Description: Returns all the members of the set value stored at key.Time Complexity: O(N)
    SCARDSCARD key

    SCARD mySet
    (integer) 1
    Description: Returns the set cardinality (number of elements) of the set stored at key.Time Complexity: O(1)
    SISMEMBERSISMEMBER key member

    SISMEMBER mySet "Hello"
    (integer) 1
    Description: Returns if member is a member of the set stored at key.Time Complexity: O(1)
    SDIFFSDIFF key1 [key2]

    SDIFF mySet myOtherSet
    1) "Hello"
    Description: Returns the members of the set resulting from the difference between the first set and all the successive sets.Time Complexity: O(N)
    SDIFFSTORESDIFFSTORE destination key1 [key2]

    SDIFFSTORE myNewSet mySet myOtherSet
    (integer) 1
    Description: This command is equal to SDIFF, but instead of returning the resulting set, it is stored in destination.Time Complexity: O(N)
    SREMSREM key member [member ...]

    SREM mySet "Hello"
    (integer) 1
    Description: Removes the specified members from the set stored at key.

    Sorted sets

    CommandSyntaxExampleOutput
    ZADDZADD key score member [score member ...]

    ZADD myZSet 1 "one" 2 "two"
    (integer) 2
    Description: Adds all the specified members with the specified scores to the sorted set stored at key. Time Complexity: O(log(N))
    ZRANGEZRANGE key start stop [WITHSCORES]

    ZRANGE myZSet 0 -1
    1) "one" 2)"two"
    Description: Returns the specified range of elements in the sorted set stored at key.Time Complexity: O(log(N)+M) where M is the number of elements returned

    Lists

    CommandSyntaxExampleOutput
    LPUSHLPUSH key value [value ...]

    LPUSH myList "World"
    (integer) 1
    Description: Inserts the specified values at the head of the list stored at key. Time Complexity: O(N)
    RPUSHRPUSH key value [value ...]

    RPUSH myList "Hello"
    (integer) 2
    Description: Inserts the specified values at the tail of the list stored at key.Time Complexity: O(N)
    LRANGELRANGE key start stop

    LRANGE myList 0 -1
    1) "World" 2) "Hello"
    Description: Returns the specified elements of the list stored at key.Time Complexity: O(S+N) where S is the distance of start and N is the number of elements in the specified range.
    LLENLLEN key

    LLEN myList
    (integer) 2
    Description: Returns the length of the list stored at key.Time Complexity: O(1)
    LPOPLPOP key [count]

    LPOP myList
    "World"
    Description: Removes and returns the first element of the list stored at key.Time Complexity: O(N)
    RPOPRPOP key [count]

    RPOP myList
    "Hello"
    Description: Removes and returns the last element of the list stored at key.Time Complexity: O(N)

    Streams

    CommandSyntaxExampleOutput
    XADDXADD key field value [field value ...]

    XADD myStream * sensorId "1234" temperature "19.8"
    1518951480106-0
    Description: Appends the specified stream entry to the stream at the specified key. Time Complexity: O(1) when adding a new entry.
    XREADXREAD [COUNT count] [BLOCK milliseconds] STREAMS key [key ...] ID [ID ...]

    XREAD COUNT 2 STREAMS myStream 0
    1) 1) "myStream" 2) 1) 1) "1518951480106-0" 2) 1) "sensorId" 2) "1234" 3) "temperature" 4) "19.8"
    Description: Read data from one or multiple streams, only returning entries with an ID greater than the last received ID reported by the caller.
    XRANGEXRANGE key start end [COUNT count]

    XRANGE myStream 1518951480106-0 1518951480106-0
    1) 1) 1) "1518951480106-0" 2) 1) "sensorId" 2) "1234" 3) "temperature" 4) "19.8"
    Description: Returns the entries matching a range of IDs in a stream. Time Complexity: O(N) with N being the number of elements being returned. If N is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1).
    XLENXLEN key

    XLEN myStream
    (integer) 1
    Description: Returns the number of entries of a stream. Time Complexity: O(1)
    XDELXDEL key ID [ID ...]

    XDEL myStream 1518951480106-0
    (integer) 1
    Description: Removes the specified entries from a stream. Time Complexity: O(1) for each single item to delete in the stream
    XTRIMXTRIM key MAXLEN [~] count

    XTRIM myStream MAXLEN 0
    (integer) 0
    Description: Trims the stream to a different length. Time Complexity: O(N), with N being the number of evicted entries. Constant times are very small however, since entries are organized in macro nodes containing multiple entries that can be released with a single deallocation.

     


    Redis stack commands

    Redis stack extends the core features +

    Redis Commands Cheat sheet


    Profile picture for Prasan Kumar
    Author:
    Prasan Kumar, Technical Solutions Developer at Redis
    Profile picture for Will Johnston
    Author:
    Will Johnston, Developer Growth Manager at Redis

    Connect

    # Syntax
    redis-cli -u redis://host:port
    redis-cli -u redis://username:password@host:port

    # Examples
    redis-cli
    redis-cli -u redis://localhost:6379
    redis-cli -u redis://myuser:mypassword@localhost:6379

    # If you run Redis through Docker
    docker exec -it <container-id-or-name> redis-cli

    note

    To setup Redis either locally or in the cloud, refer to the tutorial

    Strings/Numbers

    CommandSyntaxExampleOutput
    SETSET key value

    SET myKey "Hello"
    "OK"
    Description: Set key to hold the string value. If key already holds a value, it is overwritten, regardless of its type.Time Complexity: O(1)
    GETGET key

    GET myKey
    "Hello"
    Description: Get the string value of key. If the key does not exist the special value nil is returned.Time Complexity: O(1)
    MGETMGET key [key ...]

    MGET myKey nonExistentKey
    1) "Hello" 2) (nil)
    Description: Returns the values of all specified keys. For every key that does not hold a string value or does not exist, the special value nil is returned.Time Complexity: O(N)
    INCRINCR key

    INCR myCounter
    (integer) 1
    Description: Increments the number stored at key by one. If the key does not exist, it is set to 0 before performing the operation.Time Complexity: O(1)

    Generic

    CommandSyntaxExampleOutput
    KEYSKEYS pattern

    KEYS my*
    1) "myKey" 2) "myCounter"
    Description: Returns all keys matching pattern.Time Complexity: O(N)
    EXISTSEXISTS key [key ...]

    EXISTS myKey
    (integer) 1
    Description: Checks if one or more keys exist.Time Complexity: O(N)
    EXPIREEXPIRE key seconds

    EXPIRE myKey 120
    (integer) 1
    Description: Set a timeout on a key.After the timeout has expired, the key will automatically be deleted.Time Complexity:O(1)
    TTLTTL key

    TTL myKey
    (integer) 113
    Description: Returns the remaining time to live of a key that has a timeout.Time Complexity: O(1)
    PERSISTPERSIST key

    PERSIST myKey
    (integer) 1
    Description: Removes the expiration from a key.Time Complexity:O(1)
    SCANSCAN cursor [MATCH pattern] [COUNT count]

    SCAN 0 MATCH my* COUNT 2
    1) "3" 2) 1) "myCounter" 2) "myKey"
    Description: Iterates the set of keys in the currently selected Redis database.Time Complexity: O(1) for every call. O(N) for a complete iteration.
    DELDEL key [key ...]

    DEL myKey
    (integer) 1
    Description: Removes the specified keys.Time Complexity: O(N)
    INFOINFO [section]

    INFO server
    INFO keyspace
    # Server
    redis_version:6.2.5
    redis_git_sha1:00000000
    redis_build_id:9893b2a-dirty
    redis_mode:standalone
    os:Linux 5.4.72-microsoft-standard-WSL2 x86_64
    arch_bits:64
    ...
    # Keyspace db0:keys=2,expires=0,avg_ttl=0
    Description:Returns information and statistics about the server, with the different sections like - server, clients, memory, persistence, stats, replication, cpu, commandstats, latencystats, sentinel, cluster, modules, keyspace, errorstats.Time Complexity: O(1)

    Hashes

    CommandSyntaxExampleOutput
    HSETHSET key field value [field value ...]

    HSET h_employee_profile:101 name "Nicol" age 33
    (integer) 2
    Description: Sets the specified fields to their respective values in the hash stored at key.Time Complexity: O(N)
    HGETHGET key field

    HGET h_employee_profile:101 name
    "Nicol"
    Description: Returns the value associated with field in the hash stored at key.Time Complexity: O(1)
    HGETALLHGETALL key

    HGETALL h_employee_profile:101
    1) "name" 2) "Nicol" 3) "age" 4) "33"
    Description: Returns all fields and values of the hash stored at key.Time Complexity: O(N)
    HMGETHMGET key field1 [field2]

    HMGET h_employee_profile:101 name age
    1) "Nicol" 2) "33"
    Description: Returns the values associated with the specified fields in the hash stored at key.Time Complexity: O(N)

    Sets

    CommandSyntaxExampleOutput
    SADDSADD key member [member ...]

    SADD mySet "Hello"
    (integer) 1
    Description: Adds the specified members to the set stored at key.Time Complexity: O(N)
    SMEMBERSSMEMBERS key

    SMEMBERS mySet
    1) "Hello"
    Description: Returns all the members of the set value stored at key.Time Complexity: O(N)
    SCARDSCARD key

    SCARD mySet
    (integer) 1
    Description: Returns the set cardinality (number of elements) of the set stored at key.Time Complexity: O(1)
    SISMEMBERSISMEMBER key member

    SISMEMBER mySet "Hello"
    (integer) 1
    Description: Returns if member is a member of the set stored at key.Time Complexity: O(1)
    SDIFFSDIFF key1 [key2]

    SDIFF mySet myOtherSet
    1) "Hello"
    Description: Returns the members of the set resulting from the difference between the first set and all the successive sets.Time Complexity: O(N)
    SDIFFSTORESDIFFSTORE destination key1 [key2]

    SDIFFSTORE myNewSet mySet myOtherSet
    (integer) 1
    Description: This command is equal to SDIFF, but instead of returning the resulting set, it is stored in destination.Time Complexity: O(N)
    SREMSREM key member [member ...]

    SREM mySet "Hello"
    (integer) 1
    Description: Removes the specified members from the set stored at key.

    Sorted sets

    CommandSyntaxExampleOutput
    ZADDZADD key score member [score member ...]

    ZADD myZSet 1 "one" 2 "two"
    (integer) 2
    Description: Adds all the specified members with the specified scores to the sorted set stored at key. Time Complexity: O(log(N))
    ZRANGEZRANGE key start stop [WITHSCORES]

    ZRANGE myZSet 0 -1
    1) "one" 2)"two"
    Description: Returns the specified range of elements in the sorted set stored at key.Time Complexity: O(log(N)+M) where M is the number of elements returned

    Lists

    CommandSyntaxExampleOutput
    LPUSHLPUSH key value [value ...]

    LPUSH myList "World"
    (integer) 1
    Description: Inserts the specified values at the head of the list stored at key. Time Complexity: O(N)
    RPUSHRPUSH key value [value ...]

    RPUSH myList "Hello"
    (integer) 2
    Description: Inserts the specified values at the tail of the list stored at key.Time Complexity: O(N)
    LRANGELRANGE key start stop

    LRANGE myList 0 -1
    1) "World" 2) "Hello"
    Description: Returns the specified elements of the list stored at key.Time Complexity: O(S+N) where S is the distance of start and N is the number of elements in the specified range.
    LLENLLEN key

    LLEN myList
    (integer) 2
    Description: Returns the length of the list stored at key.Time Complexity: O(1)
    LPOPLPOP key [count]

    LPOP myList
    "World"
    Description: Removes and returns the first element of the list stored at key.Time Complexity: O(N)
    RPOPRPOP key [count]

    RPOP myList
    "Hello"
    Description: Removes and returns the last element of the list stored at key.Time Complexity: O(N)

    Streams

    CommandSyntaxExampleOutput
    XADDXADD key field value [field value ...]

    XADD myStream * sensorId "1234" temperature "19.8"
    1518951480106-0
    Description: Appends the specified stream entry to the stream at the specified key. Time Complexity: O(1) when adding a new entry.
    XREADXREAD [COUNT count] [BLOCK milliseconds] STREAMS key [key ...] ID [ID ...]

    XREAD COUNT 2 STREAMS myStream 0
    1) 1) "myStream" 2) 1) 1) "1518951480106-0" 2) 1) "sensorId" 2) "1234" 3) "temperature" 4) "19.8"
    Description: Read data from one or multiple streams, only returning entries with an ID greater than the last received ID reported by the caller.
    XRANGEXRANGE key start end [COUNT count]

    XRANGE myStream 1518951480106-0 1518951480106-0
    1) 1) 1) "1518951480106-0" 2) 1) "sensorId" 2) "1234" 3) "temperature" 4) "19.8"
    Description: Returns the entries matching a range of IDs in a stream. Time Complexity: O(N) with N being the number of elements being returned. If N is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1).
    XLENXLEN key

    XLEN myStream
    (integer) 1
    Description: Returns the number of entries of a stream. Time Complexity: O(1)
    XDELXDEL key ID [ID ...]

    XDEL myStream 1518951480106-0
    (integer) 1
    Description: Removes the specified entries from a stream. Time Complexity: O(1) for each single item to delete in the stream
    XTRIMXTRIM key MAXLEN [~] count

    XTRIM myStream MAXLEN 0
    (integer) 0
    Description: Trims the stream to a different length. Time Complexity: O(N), with N being the number of evicted entries. Constant times are very small however, since entries are organized in macro nodes containing multiple entries that can be released with a single deallocation.

     


    Redis stack commands

    Redis stack extends the core features of Redis OSS like querying across hashes and JSON documents, time series data support, -full-text search ..etc

    JSON

    CommandSyntaxExampleOutput
    JSON.SETJSON.SET key path value

    JSON.SET employee_profile:1 . '{"name":"Alice"}'
    OK
    Description: Sets JSON value at path in key.Time Complexity: O(M+N) where M is the original size and N is the new size
    JSON.GETJSON.GET key [path [path ...]]

    JSON.GET employee_profile:1

    { "name": 'Alice' }
    Description: Returns the JSON value at path in key.Time Complexity: O(N) when path is evaluated to a single value where N is the size of the value, O(N) when path is evaluated to multiple values, where N is the size of the key
    JSON.NUMINCRBYJSON.NUMINCRBY key path number

    JSON.SET employee_profile:1 .age 30
    JSON.NUMINCRBY employee_profile:1 .age 5
    35
    Description: Increments a number inside a JSON document.Time Complexity: O(1) when path is evaluated to a single value, O(N) when path is evaluated to multiple values, where N is the size of the key
    JSON.OBJKEYSJSON.OBJKEYS key [path]

    JSON.OBJKEYS employee_profile:1
    1) "name" 2) "age"
    Description: Return the keys in the object that's referenced by path. Time Complexity: O(N) when path is evaluated to a single value, where N is the number of keys in the object, O(N) when path is evaluated to multiple values, where N is the size of the key
    JSON.OBJLENJSON.OBJLEN key [path]

    JSON.OBJLEN employee_profile:1
    (integer) 2
    Description: Report the number of keys in the JSON object at path in key. Time Complexity: O(1) when path is evaluated to a single value, O(N) when path is evaluated to multiple values, where N is the size of the key
    JSON.ARRAPPENDJSON.ARRAPPEND key [path] value [value ...]

    JSON.SET employee_profile:1 .colors '["red", "green", "blue"]'
    JSON.ARRAPPEND employee_profile:1 .colors '"yellow"'
    (integer) 4
    Description: Append the json values into the array at path after the last element in it. Time Complexity: O(1) for each value added, O(N) for multiple values added where N is the size of the key
    JSON.ARRINSERTJSON.ARRINSERT key path index value [value ...]

    JSON.ARRINSERT employee_profile:1 .colors 2 '"purple"'
    (integer) 5
    Description: Insert the json values into the array at path before the index (shifts to the right). Time Complexity: O(N) when path is evaluated to a single value where N is the size of the array, O(N) when path is evaluated to multiple values, where N is the size of the key
    JSON.ARRINDEXJSON.ARRINDEX key path value [start [stop]]

    JSON.ARRINDEX employee_profile:1 .colors '"purple"'
    (integer) 2
    Description: Searches for the first occurrence of a JSON value in an array. Time Complexity: O(N) when path is evaluated to a single value where N is the size of the array, O(N) when path is evaluated to multiple values, where N is the size of the key

    Search and Query

    CommandSyntaxExampleOutput
    FT.CREATE

    FT.CREATE index
    [ON HASH | JSON]
    [PREFIX count prefix [prefix ...]]
    [FILTER {filter}]
    SCHEMA
    field_name [AS alias] TEXT | TAG | NUMERIC | GEO | VECTOR | GEOSHAPE [ SORTABLE [UNF]]
    [NOINDEX]
    ...

    FT.CREATE staff:index
    ON JSON
    PREFIX 1 staff:
    SCHEMA
    "$.name" AS name TEXT
    "$.age" AS age NUMERIC
    "$.isSingle" AS isSingle TAG
    '$["skills"][*]' AS skills TAG SEPARATOR "|"
    OK
    Description: Create an index with the given specification.Time Complexity: O(K) where K is the number of fields in the document, O(N) for keys in the keySpace
    FT.SEARCH

    FT.SEARCH index query
    [FILTER numeric_field min max [ FILTER numeric_field min max ...]]
    [RETURN count identifier [AS property] [ identifier [AS property] ...]]
    [SORTBY sortby [ ASC | DESC] [WITHCOUNT]]
    [LIMIT offset num]
    [PARAMS nargs name value [ name value ...]]

    JSON.SET "staff:1" "$" '{"name":"Bob","age":22,"isSingle":true,"skills":["NodeJS","MongoDB","React"]}'

    JSON.SET "staff:2" "$" '{"name":"Alex","age":45,"isSingle":true,"skills":["Python","MySQL","Angular"]}'

    FT.SEARCH staff:index
    "(@name:'alex')"
    RETURN 1 $ LIMIT 0 10

    FT.SEARCH staff:index
    "((@isSingle:{true}) (@age:[(18 +inf]))"
    RETURN 1 $ LIMIT 0 10
    Matching documents data
    Description: Search the index with a query, returning either documents or just ids. Time Complexity: O(N)
    FT.AGGREGATE

    FT.AGGREGATE index query
    [LOAD count field [field ...]]
    [ GROUPBY nargs property [property ...] [ REDUCE function nargs arg [arg ...] [AS name] ...
    [ SORTBY nargs [ property ASC | DESC [ property ASC | DESC ...]] [MAX num] [WITHCOUNT]
    [ APPLY expression AS name ...
    [ LIMIT offset num]
    [FILTER filter]
    [ PARAMS nargs name value [ name value ...]]

    FT.AGGREGATE staff:index "(@age:[(18 +inf])"
    GROUPBY 1 @age
    REDUCE COUNT_DISTINCT 1 @name AS staff_count

    | age | staff_count |
    | ----| ------------|
    | 22 | 1 |
    | 45 | 1 |
    Description: Run a search query on an index, and perform aggregate transformations on the results.
    FT.INFOFT.INFO index

    FT.INFO staff:index
    A list of configuration parameters and stats for the index.
    Description: Return information and statistics on the index.Time Complexity: O(1)
    FT.DROPINDEXFT.DROPINDEX index [DD]

    FT.DROPINDEX staff:index
    OK
    Description: Dropping existing index.Time Complexity:O(1) or O(N) if documents are deleted, where N is the number of keys in the keyspace
    - +full-text search ..etc

    JSON

    CommandSyntaxExampleOutput
    JSON.SETJSON.SET key path value

    JSON.SET employee_profile:1 . '{"name":"Alice"}'
    OK
    Description: Sets JSON value at path in key.Time Complexity: O(M+N) where M is the original size and N is the new size
    JSON.GETJSON.GET key [path [path ...]]

    JSON.GET employee_profile:1

    { "name": 'Alice' }
    Description: Returns the JSON value at path in key.Time Complexity: O(N) when path is evaluated to a single value where N is the size of the value, O(N) when path is evaluated to multiple values, where N is the size of the key
    JSON.NUMINCRBYJSON.NUMINCRBY key path number

    JSON.SET employee_profile:1 .age 30
    JSON.NUMINCRBY employee_profile:1 .age 5
    35
    Description: Increments a number inside a JSON document.Time Complexity: O(1) when path is evaluated to a single value, O(N) when path is evaluated to multiple values, where N is the size of the key
    JSON.OBJKEYSJSON.OBJKEYS key [path]

    JSON.OBJKEYS employee_profile:1
    1) "name" 2) "age"
    Description: Return the keys in the object that's referenced by path. Time Complexity: O(N) when path is evaluated to a single value, where N is the number of keys in the object, O(N) when path is evaluated to multiple values, where N is the size of the key
    JSON.OBJLENJSON.OBJLEN key [path]

    JSON.OBJLEN employee_profile:1
    (integer) 2
    Description: Report the number of keys in the JSON object at path in key. Time Complexity: O(1) when path is evaluated to a single value, O(N) when path is evaluated to multiple values, where N is the size of the key
    JSON.ARRAPPENDJSON.ARRAPPEND key [path] value [value ...]

    JSON.SET employee_profile:1 .colors '["red", "green", "blue"]'
    JSON.ARRAPPEND employee_profile:1 .colors '"yellow"'
    (integer) 4
    Description: Append the json values into the array at path after the last element in it. Time Complexity: O(1) for each value added, O(N) for multiple values added where N is the size of the key
    JSON.ARRINSERTJSON.ARRINSERT key path index value [value ...]

    JSON.ARRINSERT employee_profile:1 .colors 2 '"purple"'
    (integer) 5
    Description: Insert the json values into the array at path before the index (shifts to the right). Time Complexity: O(N) when path is evaluated to a single value where N is the size of the array, O(N) when path is evaluated to multiple values, where N is the size of the key
    JSON.ARRINDEXJSON.ARRINDEX key path value [start [stop]]

    JSON.ARRINDEX employee_profile:1 .colors '"purple"'
    (integer) 2
    Description: Searches for the first occurrence of a JSON value in an array. Time Complexity: O(N) when path is evaluated to a single value where N is the size of the array, O(N) when path is evaluated to multiple values, where N is the size of the key

    Search and Query

    CommandSyntaxExampleOutput
    FT.CREATE

    FT.CREATE index
    [ON HASH | JSON]
    [PREFIX count prefix [prefix ...]]
    [FILTER {filter}]
    SCHEMA
    field_name [AS alias] TEXT | TAG | NUMERIC | GEO | VECTOR | GEOSHAPE [ SORTABLE [UNF]]
    [NOINDEX]
    ...

    FT.CREATE staff:index
    ON JSON
    PREFIX 1 staff:
    SCHEMA
    "$.name" AS name TEXT
    "$.age" AS age NUMERIC
    "$.isSingle" AS isSingle TAG
    '$["skills"][*]' AS skills TAG SEPARATOR "|"
    OK
    Description: Create an index with the given specification.Time Complexity: O(K) where K is the number of fields in the document, O(N) for keys in the keySpace
    FT.SEARCH

    FT.SEARCH index query
    [FILTER numeric_field min max [ FILTER numeric_field min max ...]]
    [RETURN count identifier [AS property] [ identifier [AS property] ...]]
    [SORTBY sortby [ ASC | DESC] [WITHCOUNT]]
    [LIMIT offset num]
    [PARAMS nargs name value [ name value ...]]

    JSON.SET "staff:1" "$" '{"name":"Bob","age":22,"isSingle":true,"skills":["NodeJS","MongoDB","React"]}'

    JSON.SET "staff:2" "$" '{"name":"Alex","age":45,"isSingle":true,"skills":["Python","MySQL","Angular"]}'

    FT.SEARCH staff:index
    "(@name:'alex')"
    RETURN 1 $ LIMIT 0 10

    FT.SEARCH staff:index
    "((@isSingle:{true}) (@age:[(18 +inf]))"
    RETURN 1 $ LIMIT 0 10
    Matching documents data
    Description: Search the index with a query, returning either documents or just ids. Time Complexity: O(N)
    FT.AGGREGATE

    FT.AGGREGATE index query
    [LOAD count field [field ...]]
    [ GROUPBY nargs property [property ...] [ REDUCE function nargs arg [arg ...] [AS name] ...
    [ SORTBY nargs [ property ASC | DESC [ property ASC | DESC ...]] [MAX num] [WITHCOUNT]
    [ APPLY expression AS name ...
    [ LIMIT offset num]
    [FILTER filter]
    [ PARAMS nargs name value [ name value ...]]

    FT.AGGREGATE staff:index "(@age:[(18 +inf])"
    GROUPBY 1 @age
    REDUCE COUNT_DISTINCT 1 @name AS staff_count

    | age | staff_count |
    | ----| ------------|
    | 22 | 1 |
    | 45 | 1 |
    Description: Run a search query on an index, and perform aggregate transformations on the results.
    FT.INFOFT.INFO index

    FT.INFO staff:index
    A list of configuration parameters and stats for the index.
    Description: Return information and statistics on the index.Time Complexity: O(1)
    FT.DROPINDEXFT.DROPINDEX index [DD]

    FT.DROPINDEX staff:index
    OK
    Description: Dropping existing index.Time Complexity:O(1) or O(N) if documents are deleted, where N is the number of keys in the keyspace
    + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/connect/index.html b/howtos/quick-start/cheat-sheets/connect/index.html index 4529bbbe9e..0f2efbcae9 100644 --- a/howtos/quick-start/cheat-sheets/connect/index.html +++ b/howtos/quick-start/cheat-sheets/connect/index.html @@ -4,15 +4,15 @@ connect | The Home of Redis Developers - +
    -

    connect

    # Syntax
    redis-cli -u redis://host:port
    redis-cli -u redis://username:password@host:port

    # Examples
    redis-cli
    redis-cli -u redis://localhost:6379
    redis-cli -u redis://myuser:mypassword@localhost:6379

    # If you run Redis through Docker
    docker exec -it <container-id-or-name> redis-cli

    - +

    connect

    # Syntax
    redis-cli -u redis://host:port
    redis-cli -u redis://username:password@host:port

    # Examples
    redis-cli
    redis-cli -u redis://localhost:6379
    redis-cli -u redis://myuser:mypassword@localhost:6379

    # If you run Redis through Docker
    docker exec -it <container-id-or-name> redis-cli

    + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/generic/index.html b/howtos/quick-start/cheat-sheets/generic/index.html index 737d6fbe5b..a860bd75da 100644 --- a/howtos/quick-start/cheat-sheets/generic/index.html +++ b/howtos/quick-start/cheat-sheets/generic/index.html @@ -4,15 +4,15 @@ generic | The Home of Redis Developers - +
    -

    generic

    CommandSyntaxExampleOutput
    KEYSKEYS pattern

    KEYS my*
    1) "myKey" 2) "myCounter"
    Description: Returns all keys matching pattern.Time Complexity: O(N)
    EXISTSEXISTS key [key ...]

    EXISTS myKey
    (integer) 1
    Description: Checks if one or more keys exist.Time Complexity: O(N)
    EXPIREEXPIRE key seconds

    EXPIRE myKey 120
    (integer) 1
    Description: Set a timeout on a key.After the timeout has expired, the key will automatically be deleted.Time Complexity:O(1)
    TTLTTL key

    TTL myKey
    (integer) 113
    Description: Returns the remaining time to live of a key that has a timeout.Time Complexity: O(1)
    PERSISTPERSIST key

    PERSIST myKey
    (integer) 1
    Description: Removes the expiration from a key.Time Complexity:O(1)
    SCANSCAN cursor [MATCH pattern] [COUNT count]

    SCAN 0 MATCH my* COUNT 2
    1) "3" 2) 1) "myCounter" 2) "myKey"
    Description: Iterates the set of keys in the currently selected Redis database.Time Complexity: O(1) for every call. O(N) for a complete iteration.
    DELDEL key [key ...]

    DEL myKey
    (integer) 1
    Description: Removes the specified keys.Time Complexity: O(N)
    INFOINFO [section]

    INFO server
    INFO keyspace
    # Server
    redis_version:6.2.5
    redis_git_sha1:00000000
    redis_build_id:9893b2a-dirty
    redis_mode:standalone
    os:Linux 5.4.72-microsoft-standard-WSL2 x86_64
    arch_bits:64
    ...
    # Keyspace db0:keys=2,expires=0,avg_ttl=0
    Description:Returns information and statistics about the server, with the different sections like - server, clients, memory, persistence, stats, replication, cpu, commandstats, latencystats, sentinel, cluster, modules, keyspace, errorstats.Time Complexity: O(1)
    - +

    generic

    CommandSyntaxExampleOutput
    KEYSKEYS pattern

    KEYS my*
    1) "myKey" 2) "myCounter"
    Description: Returns all keys matching pattern.Time Complexity: O(N)
    EXISTSEXISTS key [key ...]

    EXISTS myKey
    (integer) 1
    Description: Checks if one or more keys exist.Time Complexity: O(N)
    EXPIREEXPIRE key seconds

    EXPIRE myKey 120
    (integer) 1
    Description: Set a timeout on a key.After the timeout has expired, the key will automatically be deleted.Time Complexity:O(1)
    TTLTTL key

    TTL myKey
    (integer) 113
    Description: Returns the remaining time to live of a key that has a timeout.Time Complexity: O(1)
    PERSISTPERSIST key

    PERSIST myKey
    (integer) 1
    Description: Removes the expiration from a key.Time Complexity:O(1)
    SCANSCAN cursor [MATCH pattern] [COUNT count]

    SCAN 0 MATCH my* COUNT 2
    1) "3" 2) 1) "myCounter" 2) "myKey"
    Description: Iterates the set of keys in the currently selected Redis database.Time Complexity: O(1) for every call. O(N) for a complete iteration.
    DELDEL key [key ...]

    DEL myKey
    (integer) 1
    Description: Removes the specified keys.Time Complexity: O(N)
    INFOINFO [section]

    INFO server
    INFO keyspace
    # Server
    redis_version:6.2.5
    redis_git_sha1:00000000
    redis_build_id:9893b2a-dirty
    redis_mode:standalone
    os:Linux 5.4.72-microsoft-standard-WSL2 x86_64
    arch_bits:64
    ...
    # Keyspace db0:keys=2,expires=0,avg_ttl=0
    Description:Returns information and statistics about the server, with the different sections like - server, clients, memory, persistence, stats, replication, cpu, commandstats, latencystats, sentinel, cluster, modules, keyspace, errorstats.Time Complexity: O(1)
    + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/hashes/index.html b/howtos/quick-start/cheat-sheets/hashes/index.html index c743859894..9ba315c8eb 100644 --- a/howtos/quick-start/cheat-sheets/hashes/index.html +++ b/howtos/quick-start/cheat-sheets/hashes/index.html @@ -4,15 +4,15 @@ hashes | The Home of Redis Developers - +
    -

    hashes

    CommandSyntaxExampleOutput
    HSETHSET key field value [field value ...]

    HSET h_employee_profile:101 name "Nicol" age 33
    (integer) 2
    Description: Sets the specified fields to their respective values in the hash stored at key.Time Complexity: O(N)
    HGETHGET key field

    HGET h_employee_profile:101 name
    "Nicol"
    Description: Returns the value associated with field in the hash stored at key.Time Complexity: O(1)
    HGETALLHGETALL key

    HGETALL h_employee_profile:101
    1) "name" 2) "Nicol" 3) "age" 4) "33"
    Description: Returns all fields and values of the hash stored at key.Time Complexity: O(N)
    HMGETHMGET key field1 [field2]

    HMGET h_employee_profile:101 name age
    1) "Nicol" 2) "33"
    Description: Returns the values associated with the specified fields in the hash stored at key.Time Complexity: O(N)
    - +

    hashes

    CommandSyntaxExampleOutput
    HSETHSET key field value [field value ...]

    HSET h_employee_profile:101 name "Nicol" age 33
    (integer) 2
    Description: Sets the specified fields to their respective values in the hash stored at key.Time Complexity: O(N)
    HGETHGET key field

    HGET h_employee_profile:101 name
    "Nicol"
    Description: Returns the value associated with field in the hash stored at key.Time Complexity: O(1)
    HGETALLHGETALL key

    HGETALL h_employee_profile:101
    1) "name" 2) "Nicol" 3) "age" 4) "33"
    Description: Returns all fields and values of the hash stored at key.Time Complexity: O(N)
    HMGETHMGET key field1 [field2]

    HMGET h_employee_profile:101 name age
    1) "Nicol" 2) "33"
    Description: Returns the values associated with the specified fields in the hash stored at key.Time Complexity: O(N)
    + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/json/index.html b/howtos/quick-start/cheat-sheets/json/index.html index 5f089ade0d..41a39260a1 100644 --- a/howtos/quick-start/cheat-sheets/json/index.html +++ b/howtos/quick-start/cheat-sheets/json/index.html @@ -4,15 +4,15 @@ json | The Home of Redis Developers - +
    -

    json

    CommandSyntaxExampleOutput
    JSON.SETJSON.SET key path value

    JSON.SET employee_profile:1 . '{"name":"Alice"}'
    OK
    Description: Sets JSON value at path in key.Time Complexity: O(M+N) where M is the original size and N is the new size
    JSON.GETJSON.GET key [path [path ...]]

    JSON.GET employee_profile:1

    { "name": 'Alice' }
    Description: Returns the JSON value at path in key.Time Complexity: O(N) when path is evaluated to a single value where N is the size of the value, O(N) when path is evaluated to multiple values, where N is the size of the key
    JSON.NUMINCRBYJSON.NUMINCRBY key path number

    JSON.SET employee_profile:1 .age 30
    JSON.NUMINCRBY employee_profile:1 .age 5
    35
    Description: Increments a number inside a JSON document.Time Complexity: O(1) when path is evaluated to a single value, O(N) when path is evaluated to multiple values, where N is the size of the key
    JSON.OBJKEYSJSON.OBJKEYS key [path]

    JSON.OBJKEYS employee_profile:1
    1) "name" 2) "age"
    Description: Return the keys in the object that's referenced by path. Time Complexity: O(N) when path is evaluated to a single value, where N is the number of keys in the object, O(N) when path is evaluated to multiple values, where N is the size of the key
    JSON.OBJLENJSON.OBJLEN key [path]

    JSON.OBJLEN employee_profile:1
    (integer) 2
    Description: Report the number of keys in the JSON object at path in key. Time Complexity: O(1) when path is evaluated to a single value, O(N) when path is evaluated to multiple values, where N is the size of the key
    JSON.ARRAPPENDJSON.ARRAPPEND key [path] value [value ...]

    JSON.SET employee_profile:1 .colors '["red", "green", "blue"]'
    JSON.ARRAPPEND employee_profile:1 .colors '"yellow"'
    (integer) 4
    Description: Append the json values into the array at path after the last element in it. Time Complexity: O(1) for each value added, O(N) for multiple values added where N is the size of the key
    JSON.ARRINSERTJSON.ARRINSERT key path index value [value ...]

    JSON.ARRINSERT employee_profile:1 .colors 2 '"purple"'
    (integer) 5
    Description: Insert the json values into the array at path before the index (shifts to the right). Time Complexity: O(N) when path is evaluated to a single value where N is the size of the array, O(N) when path is evaluated to multiple values, where N is the size of the key
    JSON.ARRINDEXJSON.ARRINDEX key path value [start [stop]]

    JSON.ARRINDEX employee_profile:1 .colors '"purple"'
    (integer) 2
    Description: Searches for the first occurrence of a JSON value in an array. Time Complexity: O(N) when path is evaluated to a single value where N is the size of the array, O(N) when path is evaluated to multiple values, where N is the size of the key
    - +

    json

    CommandSyntaxExampleOutput
    JSON.SETJSON.SET key path value

    JSON.SET employee_profile:1 . '{"name":"Alice"}'
    OK
    Description: Sets JSON value at path in key.Time Complexity: O(M+N) where M is the original size and N is the new size
    JSON.GETJSON.GET key [path [path ...]]

    JSON.GET employee_profile:1

    { "name": 'Alice' }
    Description: Returns the JSON value at path in key.Time Complexity: O(N) when path is evaluated to a single value where N is the size of the value, O(N) when path is evaluated to multiple values, where N is the size of the key
    JSON.NUMINCRBYJSON.NUMINCRBY key path number

    JSON.SET employee_profile:1 .age 30
    JSON.NUMINCRBY employee_profile:1 .age 5
    35
    Description: Increments a number inside a JSON document.Time Complexity: O(1) when path is evaluated to a single value, O(N) when path is evaluated to multiple values, where N is the size of the key
    JSON.OBJKEYSJSON.OBJKEYS key [path]

    JSON.OBJKEYS employee_profile:1
    1) "name" 2) "age"
    Description: Return the keys in the object that's referenced by path. Time Complexity: O(N) when path is evaluated to a single value, where N is the number of keys in the object, O(N) when path is evaluated to multiple values, where N is the size of the key
    JSON.OBJLENJSON.OBJLEN key [path]

    JSON.OBJLEN employee_profile:1
    (integer) 2
    Description: Report the number of keys in the JSON object at path in key. Time Complexity: O(1) when path is evaluated to a single value, O(N) when path is evaluated to multiple values, where N is the size of the key
    JSON.ARRAPPENDJSON.ARRAPPEND key [path] value [value ...]

    JSON.SET employee_profile:1 .colors '["red", "green", "blue"]'
    JSON.ARRAPPEND employee_profile:1 .colors '"yellow"'
    (integer) 4
    Description: Append the json values into the array at path after the last element in it. Time Complexity: O(1) for each value added, O(N) for multiple values added where N is the size of the key
    JSON.ARRINSERTJSON.ARRINSERT key path index value [value ...]

    JSON.ARRINSERT employee_profile:1 .colors 2 '"purple"'
    (integer) 5
    Description: Insert the json values into the array at path before the index (shifts to the right). Time Complexity: O(N) when path is evaluated to a single value where N is the size of the array, O(N) when path is evaluated to multiple values, where N is the size of the key
    JSON.ARRINDEXJSON.ARRINDEX key path value [start [stop]]

    JSON.ARRINDEX employee_profile:1 .colors '"purple"'
    (integer) 2
    Description: Searches for the first occurrence of a JSON value in an array. Time Complexity: O(N) when path is evaluated to a single value where N is the size of the array, O(N) when path is evaluated to multiple values, where N is the size of the key
    + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/lists/index.html b/howtos/quick-start/cheat-sheets/lists/index.html index 5e56ce1547..afdb476a9d 100644 --- a/howtos/quick-start/cheat-sheets/lists/index.html +++ b/howtos/quick-start/cheat-sheets/lists/index.html @@ -4,15 +4,15 @@ lists | The Home of Redis Developers - +
    -

    lists

    CommandSyntaxExampleOutput
    LPUSHLPUSH key value [value ...]

    LPUSH myList "World"
    (integer) 1
    Description: Inserts the specified values at the head of the list stored at key. Time Complexity: O(N)
    RPUSHRPUSH key value [value ...]

    RPUSH myList "Hello"
    (integer) 2
    Description: Inserts the specified values at the tail of the list stored at key.Time Complexity: O(N)
    LRANGELRANGE key start stop

    LRANGE myList 0 -1
    1) "World" 2) "Hello"
    Description: Returns the specified elements of the list stored at key.Time Complexity: O(S+N) where S is the distance of start and N is the number of elements in the specified range.
    LLENLLEN key

    LLEN myList
    (integer) 2
    Description: Returns the length of the list stored at key.Time Complexity: O(1)
    LPOPLPOP key [count]

    LPOP myList
    "World"
    Description: Removes and returns the first element of the list stored at key.Time Complexity: O(N)
    RPOPRPOP key [count]

    RPOP myList
    "Hello"
    Description: Removes and returns the last element of the list stored at key.Time Complexity: O(N)
    - +

    lists

    CommandSyntaxExampleOutput
    LPUSHLPUSH key value [value ...]

    LPUSH myList "World"
    (integer) 1
    Description: Inserts the specified values at the head of the list stored at key. Time Complexity: O(N)
    RPUSHRPUSH key value [value ...]

    RPUSH myList "Hello"
    (integer) 2
    Description: Inserts the specified values at the tail of the list stored at key.Time Complexity: O(N)
    LRANGELRANGE key start stop

    LRANGE myList 0 -1
    1) "World" 2) "Hello"
    Description: Returns the specified elements of the list stored at key.Time Complexity: O(S+N) where S is the distance of start and N is the number of elements in the specified range.
    LLENLLEN key

    LLEN myList
    (integer) 2
    Description: Returns the length of the list stored at key.Time Complexity: O(1)
    LPOPLPOP key [count]

    LPOP myList
    "World"
    Description: Removes and returns the first element of the list stored at key.Time Complexity: O(N)
    RPOPRPOP key [count]

    RPOP myList
    "Hello"
    Description: Removes and returns the last element of the list stored at key.Time Complexity: O(N)
    + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/search-and-query/index.html b/howtos/quick-start/cheat-sheets/search-and-query/index.html index a1de96978a..dac289d4f0 100644 --- a/howtos/quick-start/cheat-sheets/search-and-query/index.html +++ b/howtos/quick-start/cheat-sheets/search-and-query/index.html @@ -4,15 +4,15 @@ search-and-query | The Home of Redis Developers - +
    -

    search-and-query

    CommandSyntaxExampleOutput
    FT.CREATE

    FT.CREATE index
    [ON HASH | JSON]
    [PREFIX count prefix [prefix ...]]
    [FILTER {filter}]
    SCHEMA
    field_name [AS alias] TEXT | TAG | NUMERIC | GEO | VECTOR | GEOSHAPE [ SORTABLE [UNF]]
    [NOINDEX]
    ...

    FT.CREATE staff:index
    ON JSON
    PREFIX 1 staff:
    SCHEMA
    "$.name" AS name TEXT
    "$.age" AS age NUMERIC
    "$.isSingle" AS isSingle TAG
    '$["skills"][*]' AS skills TAG SEPARATOR "|"
    OK
    Description: Create an index with the given specification.Time Complexity: O(K) where K is the number of fields in the document, O(N) for keys in the keySpace
    FT.SEARCH

    FT.SEARCH index query
    [FILTER numeric_field min max [ FILTER numeric_field min max ...]]
    [RETURN count identifier [AS property] [ identifier [AS property] ...]]
    [SORTBY sortby [ ASC | DESC] [WITHCOUNT]]
    [LIMIT offset num]
    [PARAMS nargs name value [ name value ...]]

    JSON.SET "staff:1" "$" '{"name":"Bob","age":22,"isSingle":true,"skills":["NodeJS","MongoDB","React"]}'

    JSON.SET "staff:2" "$" '{"name":"Alex","age":45,"isSingle":true,"skills":["Python","MySQL","Angular"]}'

    FT.SEARCH staff:index
    "(@name:'alex')"
    RETURN 1 $ LIMIT 0 10

    FT.SEARCH staff:index
    "((@isSingle:{true}) (@age:[(18 +inf]))"
    RETURN 1 $ LIMIT 0 10
    Matching documents data
    Description: Search the index with a query, returning either documents or just ids. Time Complexity: O(N)
    FT.AGGREGATE

    FT.AGGREGATE index query
    [LOAD count field [field ...]]
    [ GROUPBY nargs property [property ...] [ REDUCE function nargs arg [arg ...] [AS name] ...
    [ SORTBY nargs [ property ASC | DESC [ property ASC | DESC ...]] [MAX num] [WITHCOUNT]
    [ APPLY expression AS name ...
    [ LIMIT offset num]
    [FILTER filter]
    [ PARAMS nargs name value [ name value ...]]

    FT.AGGREGATE staff:index "(@age:[(18 +inf])"
    GROUPBY 1 @age
    REDUCE COUNT_DISTINCT 1 @name AS staff_count

    | age | staff_count |
    | ----| ------------|
    | 22 | 1 |
    | 45 | 1 |
    Description: Run a search query on an index, and perform aggregate transformations on the results.
    FT.INFOFT.INFO index

    FT.INFO staff:index
    A list of configuration parameters and stats for the index.
    Description: Return information and statistics on the index.Time Complexity: O(1)
    FT.DROPINDEXFT.DROPINDEX index [DD]

    FT.DROPINDEX staff:index
    OK
    Description: Dropping existing index.Time Complexity:O(1) or O(N) if documents are deleted, where N is the number of keys in the keyspace
    - +

    search-and-query

    CommandSyntaxExampleOutput
    FT.CREATE

    FT.CREATE index
    [ON HASH | JSON]
    [PREFIX count prefix [prefix ...]]
    [FILTER {filter}]
    SCHEMA
    field_name [AS alias] TEXT | TAG | NUMERIC | GEO | VECTOR | GEOSHAPE [ SORTABLE [UNF]]
    [NOINDEX]
    ...

    FT.CREATE staff:index
    ON JSON
    PREFIX 1 staff:
    SCHEMA
    "$.name" AS name TEXT
    "$.age" AS age NUMERIC
    "$.isSingle" AS isSingle TAG
    '$["skills"][*]' AS skills TAG SEPARATOR "|"
    OK
    Description: Create an index with the given specification.Time Complexity: O(K) where K is the number of fields in the document, O(N) for keys in the keySpace
    FT.SEARCH

    FT.SEARCH index query
    [FILTER numeric_field min max [ FILTER numeric_field min max ...]]
    [RETURN count identifier [AS property] [ identifier [AS property] ...]]
    [SORTBY sortby [ ASC | DESC] [WITHCOUNT]]
    [LIMIT offset num]
    [PARAMS nargs name value [ name value ...]]

    JSON.SET "staff:1" "$" '{"name":"Bob","age":22,"isSingle":true,"skills":["NodeJS","MongoDB","React"]}'

    JSON.SET "staff:2" "$" '{"name":"Alex","age":45,"isSingle":true,"skills":["Python","MySQL","Angular"]}'

    FT.SEARCH staff:index
    "(@name:'alex')"
    RETURN 1 $ LIMIT 0 10

    FT.SEARCH staff:index
    "((@isSingle:{true}) (@age:[(18 +inf]))"
    RETURN 1 $ LIMIT 0 10
    Matching documents data
    Description: Search the index with a query, returning either documents or just ids. Time Complexity: O(N)
    FT.AGGREGATE

    FT.AGGREGATE index query
    [LOAD count field [field ...]]
    [ GROUPBY nargs property [property ...] [ REDUCE function nargs arg [arg ...] [AS name] ...
    [ SORTBY nargs [ property ASC | DESC [ property ASC | DESC ...]] [MAX num] [WITHCOUNT]
    [ APPLY expression AS name ...
    [ LIMIT offset num]
    [FILTER filter]
    [ PARAMS nargs name value [ name value ...]]

    FT.AGGREGATE staff:index "(@age:[(18 +inf])"
    GROUPBY 1 @age
    REDUCE COUNT_DISTINCT 1 @name AS staff_count

    | age | staff_count |
    | ----| ------------|
    | 22 | 1 |
    | 45 | 1 |
    Description: Run a search query on an index, and perform aggregate transformations on the results.
    FT.INFOFT.INFO index

    FT.INFO staff:index
    A list of configuration parameters and stats for the index.
    Description: Return information and statistics on the index.Time Complexity: O(1)
    FT.DROPINDEXFT.DROPINDEX index [DD]

    FT.DROPINDEX staff:index
    OK
    Description: Dropping existing index.Time Complexity:O(1) or O(N) if documents are deleted, where N is the number of keys in the keyspace
    + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/sets/index.html b/howtos/quick-start/cheat-sheets/sets/index.html index 4d2f20d44f..6efd499915 100644 --- a/howtos/quick-start/cheat-sheets/sets/index.html +++ b/howtos/quick-start/cheat-sheets/sets/index.html @@ -4,15 +4,15 @@ sets | The Home of Redis Developers - +
    -

    sets

    CommandSyntaxExampleOutput
    SADDSADD key member [member ...]

    SADD mySet "Hello"
    (integer) 1
    Description: Adds the specified members to the set stored at key.Time Complexity: O(N)
    SMEMBERSSMEMBERS key

    SMEMBERS mySet
    1) "Hello"
    Description: Returns all the members of the set value stored at key.Time Complexity: O(N)
    SCARDSCARD key

    SCARD mySet
    (integer) 1
    Description: Returns the set cardinality (number of elements) of the set stored at key.Time Complexity: O(1)
    SISMEMBERSISMEMBER key member

    SISMEMBER mySet "Hello"
    (integer) 1
    Description: Returns if member is a member of the set stored at key.Time Complexity: O(1)
    SDIFFSDIFF key1 [key2]

    SDIFF mySet myOtherSet
    1) "Hello"
    Description: Returns the members of the set resulting from the difference between the first set and all the successive sets.Time Complexity: O(N)
    SDIFFSTORESDIFFSTORE destination key1 [key2]

    SDIFFSTORE myNewSet mySet myOtherSet
    (integer) 1
    Description: This command is equal to SDIFF, but instead of returning the resulting set, it is stored in destination.Time Complexity: O(N)
    SREMSREM key member [member ...]

    SREM mySet "Hello"
    (integer) 1
    Description: Removes the specified members from the set stored at key.
    - +

    sets

    CommandSyntaxExampleOutput
    SADDSADD key member [member ...]

    SADD mySet "Hello"
    (integer) 1
    Description: Adds the specified members to the set stored at key.Time Complexity: O(N)
    SMEMBERSSMEMBERS key

    SMEMBERS mySet
    1) "Hello"
    Description: Returns all the members of the set value stored at key.Time Complexity: O(N)
    SCARDSCARD key

    SCARD mySet
    (integer) 1
    Description: Returns the set cardinality (number of elements) of the set stored at key.Time Complexity: O(1)
    SISMEMBERSISMEMBER key member

    SISMEMBER mySet "Hello"
    (integer) 1
    Description: Returns if member is a member of the set stored at key.Time Complexity: O(1)
    SDIFFSDIFF key1 [key2]

    SDIFF mySet myOtherSet
    1) "Hello"
    Description: Returns the members of the set resulting from the difference between the first set and all the successive sets.Time Complexity: O(N)
    SDIFFSTORESDIFFSTORE destination key1 [key2]

    SDIFFSTORE myNewSet mySet myOtherSet
    (integer) 1
    Description: This command is equal to SDIFF, but instead of returning the resulting set, it is stored in destination.Time Complexity: O(N)
    SREMSREM key member [member ...]

    SREM mySet "Hello"
    (integer) 1
    Description: Removes the specified members from the set stored at key.
    + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/sorted-sets/index.html b/howtos/quick-start/cheat-sheets/sorted-sets/index.html index f066e0189b..a2f2d98899 100644 --- a/howtos/quick-start/cheat-sheets/sorted-sets/index.html +++ b/howtos/quick-start/cheat-sheets/sorted-sets/index.html @@ -4,15 +4,15 @@ sorted-sets | The Home of Redis Developers - +
    -

    sorted-sets

    CommandSyntaxExampleOutput
    ZADDZADD key score member [score member ...]

    ZADD myZSet 1 "one" 2 "two"
    (integer) 2
    Description: Adds all the specified members with the specified scores to the sorted set stored at key. Time Complexity: O(log(N))
    ZRANGEZRANGE key start stop [WITHSCORES]

    ZRANGE myZSet 0 -1
    1) "one" 2)"two"
    Description: Returns the specified range of elements in the sorted set stored at key.Time Complexity: O(log(N)+M) where M is the number of elements returned
    - +

    sorted-sets

    CommandSyntaxExampleOutput
    ZADDZADD key score member [score member ...]

    ZADD myZSet 1 "one" 2 "two"
    (integer) 2
    Description: Adds all the specified members with the specified scores to the sorted set stored at key. Time Complexity: O(log(N))
    ZRANGEZRANGE key start stop [WITHSCORES]

    ZRANGE myZSet 0 -1
    1) "one" 2)"two"
    Description: Returns the specified range of elements in the sorted set stored at key.Time Complexity: O(log(N)+M) where M is the number of elements returned
    + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/streams/index.html b/howtos/quick-start/cheat-sheets/streams/index.html index 3de52ff74c..5b85501b5b 100644 --- a/howtos/quick-start/cheat-sheets/streams/index.html +++ b/howtos/quick-start/cheat-sheets/streams/index.html @@ -4,15 +4,15 @@ streams | The Home of Redis Developers - +
    -

    streams

    CommandSyntaxExampleOutput
    XADDXADD key field value [field value ...]

    XADD myStream * sensorId "1234" temperature "19.8"
    1518951480106-0
    Description: Appends the specified stream entry to the stream at the specified key. Time Complexity: O(1) when adding a new entry.
    XREADXREAD [COUNT count] [BLOCK milliseconds] STREAMS key [key ...] ID [ID ...]

    XREAD COUNT 2 STREAMS myStream 0
    1) 1) "myStream" 2) 1) 1) "1518951480106-0" 2) 1) "sensorId" 2) "1234" 3) "temperature" 4) "19.8"
    Description: Read data from one or multiple streams, only returning entries with an ID greater than the last received ID reported by the caller.
    XRANGEXRANGE key start end [COUNT count]

    XRANGE myStream 1518951480106-0 1518951480106-0
    1) 1) 1) "1518951480106-0" 2) 1) "sensorId" 2) "1234" 3) "temperature" 4) "19.8"
    Description: Returns the entries matching a range of IDs in a stream. Time Complexity: O(N) with N being the number of elements being returned. If N is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1).
    XLENXLEN key

    XLEN myStream
    (integer) 1
    Description: Returns the number of entries of a stream. Time Complexity: O(1)
    XDELXDEL key ID [ID ...]

    XDEL myStream 1518951480106-0
    (integer) 1
    Description: Removes the specified entries from a stream. Time Complexity: O(1) for each single item to delete in the stream
    XTRIMXTRIM key MAXLEN [~] count

    XTRIM myStream MAXLEN 0
    (integer) 0
    Description: Trims the stream to a different length. Time Complexity: O(N), with N being the number of evicted entries. Constant times are very small however, since entries are organized in macro nodes containing multiple entries that can be released with a single deallocation.
    - +

    streams

    CommandSyntaxExampleOutput
    XADDXADD key field value [field value ...]

    XADD myStream * sensorId "1234" temperature "19.8"
    1518951480106-0
    Description: Appends the specified stream entry to the stream at the specified key. Time Complexity: O(1) when adding a new entry.
    XREADXREAD [COUNT count] [BLOCK milliseconds] STREAMS key [key ...] ID [ID ...]

    XREAD COUNT 2 STREAMS myStream 0
    1) 1) "myStream" 2) 1) 1) "1518951480106-0" 2) 1) "sensorId" 2) "1234" 3) "temperature" 4) "19.8"
    Description: Read data from one or multiple streams, only returning entries with an ID greater than the last received ID reported by the caller.
    XRANGEXRANGE key start end [COUNT count]

    XRANGE myStream 1518951480106-0 1518951480106-0
    1) 1) 1) "1518951480106-0" 2) 1) "sensorId" 2) "1234" 3) "temperature" 4) "19.8"
    Description: Returns the entries matching a range of IDs in a stream. Time Complexity: O(N) with N being the number of elements being returned. If N is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1).
    XLENXLEN key

    XLEN myStream
    (integer) 1
    Description: Returns the number of entries of a stream. Time Complexity: O(1)
    XDELXDEL key ID [ID ...]

    XDEL myStream 1518951480106-0
    (integer) 1
    Description: Removes the specified entries from a stream. Time Complexity: O(1) for each single item to delete in the stream
    XTRIMXTRIM key MAXLEN [~] count

    XTRIM myStream MAXLEN 0
    (integer) 0
    Description: Trims the stream to a different length. Time Complexity: O(N), with N being the number of evicted entries. Constant times are very small however, since entries are organized in macro nodes containing multiple entries that can be released with a single deallocation.
    + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/strings/index.html b/howtos/quick-start/cheat-sheets/strings/index.html index 12606e49fd..c4a081a5bd 100644 --- a/howtos/quick-start/cheat-sheets/strings/index.html +++ b/howtos/quick-start/cheat-sheets/strings/index.html @@ -4,15 +4,15 @@ strings | The Home of Redis Developers - +
    -

    strings

    CommandSyntaxExampleOutput
    SETSET key value

    SET myKey "Hello"
    "OK"
    Description: Set key to hold the string value. If key already holds a value, it is overwritten, regardless of its type.Time Complexity: O(1)
    GETGET key

    GET myKey
    "Hello"
    Description: Get the string value of key. If the key does not exist the special value nil is returned.Time Complexity: O(1)
    MGETMGET key [key ...]

    MGET myKey nonExistentKey
    1) "Hello" 2) (nil)
    Description: Returns the values of all specified keys. For every key that does not hold a string value or does not exist, the special value nil is returned.Time Complexity: O(N)
    INCRINCR key

    INCR myCounter
    (integer) 1
    Description: Increments the number stored at key by one. If the key does not exist, it is set to 0 before performing the operation.Time Complexity: O(1)
    - +

    strings

    CommandSyntaxExampleOutput
    SETSET key value

    SET myKey "Hello"
    "OK"
    Description: Set key to hold the string value. If key already holds a value, it is overwritten, regardless of its type.Time Complexity: O(1)
    GETGET key

    GET myKey
    "Hello"
    Description: Get the string value of key. If the key does not exist the special value nil is returned.Time Complexity: O(1)
    MGETMGET key [key ...]

    MGET myKey nonExistentKey
    1) "Hello" 2) (nil)
    Description: Returns the values of all specified keys. For every key that does not hold a string value or does not exist, the special value nil is returned.Time Complexity: O(N)
    INCRINCR key

    INCR myCounter
    (integer) 1
    Description: Increments the number stored at key by one. If the key does not exist, it is set to 0 before performing the operation.Time Complexity: O(1)
    + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/triggers-and-functions/index.html b/howtos/quick-start/cheat-sheets/triggers-and-functions/index.html index 570b35cc5e..8a34913b64 100644 --- a/howtos/quick-start/cheat-sheets/triggers-and-functions/index.html +++ b/howtos/quick-start/cheat-sheets/triggers-and-functions/index.html @@ -4,7 +4,7 @@ triggers-and-functions | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/howtos/quick-start/index.html b/howtos/quick-start/index.html index 335a466829..df6b3430fe 100644 --- a/howtos/quick-start/index.html +++ b/howtos/quick-start/index.html @@ -4,7 +4,7 @@ Getting Started | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    Getting Started


    Profile picture for Prasan Kumar
    Author:
    Prasan Kumar, Technical Solutions Developer at Redis
    Profile picture for Will Johnston
    Author:
    Will Johnston, Developer Growth Manager at Redis

    Welcome to the getting started for the official Redis Developer Hub!

    If you are new to Redis, we recommend starting with Redis University (RU101). RU101 is an introductory course, perfect for developers new to Redis. In this course, you’ll learn about the data structures in Redis, and you’ll see how to practically apply them in the real world.

    If you have questions related to Redis, come join the Redis Discord server. Our Discord server is a place where you can learn, share, and collaborate about anything and everything Redis. Connect with users from the community and Redis University. Get your questions answered and learn cool new tips and tricks! Watch for notifications of the latest content from Redis and the community. And share your own content with the community.

    Setup Redis

    There are essentially two ways you can use Redis:

    • Cloud Redis: A hosted and serverless Redis database-as-a-service (DBaaS). The fastest way to deploy Redis Enterprise via Amazon AWS, Google Cloud Platform, or Microsoft Azure.
    • On-prem/local Redis: Self-managed Redis using your own server and any operating system (Mac OS, Windows, or Linux).

    If you choose to use local Redis we strongly recommend using Docker. If you choose not to use Docker, use the following instructions based on your OS:

    The docker run command below exposes redis-server on port 6379 and RedisInsight on port 8001. You can use RedisInsight by pointing your browser to http://localhost:8001.

    # install
    $ docker run -d --name redis-stack -p 6379:6379 -p 8001:8001 redis/redis-stack:latest

    You can use redis-cli to connect to the server at localhost:6379. If you don’t have redis-cli installed locally, you can run it from the Docker container like below:

    # connect
    $ docker exec -it redis-stack redis-cli

    Detailed Docker instructions can be viewed here

    Basic Querying with Redis

    • Connect to Redis using CLI or RedisInsight (a GUI tool to visualize data & run commands)

    RedisInsight RedisInsight

    # syntax 1 : connect using host & port, followed by password
    $ redis-cli -h host -p port
    > AUTH password
    OK

    # example 1
    $ redis-cli -h redis15.localnet.org -p 6390
    > AUTH myUnguessablePassword
    OK

    # syntax 2 : connect using uri
    $ redis-cli -u redis://user:password@host:port/dbnum

    # example 2
    $ redis-cli -u redis://LJenkins:p%40ssw0rd@redis-16379.hosted.com:16379/0

    • Basic CLI / RedisInsight workbench commands
    # syntax : Check specific keys
    > KEYS pattern

    # example
    > KEYS *

    #------------
    # syntax : Check number of keys in database
    > DBSIZE

    #------------
    # syntax : set a key value
    > SET key value EX expirySeconds

    # example
    > SET company redis EX 60

    #------------
    # syntax : get value by key
    > GET key

    # example
    > GET company

    #------------
    # syntax : delete keys
    > DEL key1 key2 key3 ... keyN

    # example
    > DEL company

    #------------
    # syntax : Check if key exists
    > EXISTS key1

    # example
    > EXISTS company

    #------------
    # syntax : set expiry to key
    > EXPIRE key seconds

    # example
    > EXPIRE lastname 60

    #------------
    # syntax : remove expiry from key
    > PERSIST key

    # example
    > PERSIST lastname

    #------------
    # syntax : find (remaining) time to live of a key
    > TTL key

    # example
    > TTL lastname

    #------------
    # syntax : increment a number
    > INCR key

    # example
    > INCR counter

    #------------
    # syntax : decrement a number
    > DECR key

    # example
    > DECR counter

    Detailed CLI instructions can be viewed here and commands can be checked here

    Secondary Indexing and Searching with Redis

    Redis Stack enables the JSON data type in Redis.

    # syntax : set an object value to a key
    > JSON.SET objKey $ value

    # example
    > JSON.SET person $ '{"name":"Leonard Cohen","dob":1478476800,"isActive": true, "hobbies":["music", "cricket"]}'

    #------------
    # syntax : get object value of a key
    > JSON.GET objKey $

    # example
    > JSON.GET person $

    #------------
    # syntax : find object key length
    > JSON.OBJLEN objKey $

    # example
    > JSON.OBJLEN person $

    #------------
    # syntax : find object keys
    > JSON.OBJKEYS objKey $

    # example
    > JSON.OBJKEYS person $

    #------------
    # syntax : update nested property
    > JSON.SET objKey $.prop value

    # example
    > JSON.SET person $.name '"Alex"'

    #------------
    # syntax : update nested array
    > JSON.SET objKey $.arrayProp fullValue
    > JSON.SET objKey $.arrayProp[index] value

    # example
    > JSON.SET person $.hobbies '["music", "cricket"]'
    > JSON.SET person $.hobbies[1] '"dance"'

    #------------
    # syntax : remove nested array item by index
    > JSON.ARRPOP objKey $.arrayProp index

    # example
    > JSON.ARRPOP person $.hobbies 1

    More details can be found in the Redis Stack docs


    Redis Stack enables a query and indexing engine for Redis, providing secondary indexing, full-text search and aggregations capabilities.

    • We have to create index on schema to be able to search on its data
    # syntax
    > FT.CREATE {index_name} ON JSON PREFIX {count} {prefix} SCHEMA {json_path} AS {attribute} {type}
    # NOTE: attribute = logical name, json_path = JSONPath expressions

    # example
    > FT.CREATE userIdx ON JSON PREFIX 1 users: SCHEMA $.user.name AS name TEXT $.user.hobbies AS hobbies TAG $.user.age as age NUMERIC
    # NOTE: You can search by any attribute mentioned in the above index for keys that start with users: (e.g. users:1).
    • More details on Indexing JSON can be found here

    Once index is created, any pre-existing/ new/ modified JSON document is automatically indexed.

    //sample json document
    {
    "user": {
    "name": "John Smith",
    "hobbies": "foo,bar",
    "age": 23
    }
    }
    # adding JSON document
    > JSON.SET myDoc $ '{"user":{"name":"John Smith","hobbies":"foo,bar","age":23}}'
    • Search
    # search all user documents with name 'John'
    > FT.SEARCH userIdx '@name:(John)'
    1) (integer) 1
    2) "myDoc"
    3) 1) "$"
    2) {"user":{"name":"John Smith","hobbies":"foo,bar","age":23}}"
    • Search & project required fields
    # search documents with name 'John' & project only age field
    > FT.SEARCH userIdx '@name:(John)' RETURN 1 $.user.age
    1) (integer) 1
    2) "myDoc"
    3) 1) "$.user.age"
    2) "23"
    # project multiple fields
    > FT.SEARCH userIdx '@name:(John)' RETURN 2 $.user.age $.user.name
    1) (integer) 1
    2) "myDoc"
    3) 1) "$.user.age"
    2) "23"
    3) "$.user.name"
    4) "John Smith"

    #------------
    # project with alias name
    > FT.SEARCH userIdx '@name:(John)' RETURN 3 $.user.age AS userAge

    1) (integer) 1
    2) "myDoc"
    3) 1) "userAge"
    2) "23"
    #------------

    # multi field query
    > FT.SEARCH userIdx '@name:(John) @hobbies:{foo | me} @age:[20 30]'
    1) (integer) 1
    2) "myDoc"
    3) 1) "$"
    2) {"user":{"name":"John Smith","hobbies":"foo,bar","age":23}}"

    More details on query syntax

    • Drop index
    > FT.DROPINDEX userIdx

    Useful Resources

    1. Redis and JSON explained (Revisited in 2022) video
    2. Searching with Redis Stack
    3. Redis University 204, Storing, Querying, and Indexing JSON at Speed

    Sync Redis with Other Databases

    RedisGears adds a dynamic execution framework for your Redis data that enables you to write and execute functions that implement data flows in Redis.

    Consider following example to sync data with MongoDB.

    • Create the below python file and update the MongoDB connection details, database, collection and primary key name to be synced
    write-behind.py
    # Gears Recipe for a single write behind

    # import redis gears & mongo db libs
    from rgsync import RGJSONWriteBehind, RGJSONWriteThrough
    from rgsync.Connectors import MongoConnector, MongoConnection

    # change mongodb connection
    connection = MongoConnection("", "", "", "", "ENV_MONGODB_CONNECTION_URL")

    # change MongoDB database
    db = 'ENV_DB_NAME'

    # change MongoDB collection & it's primary key
    collection1Connector = MongoConnector(connection, db, 'ENV_COLLECTION1_NAME', 'ENV_COLLECTION1_PRIMARY_KEY')

    # change redis keys with prefix that must be synced with mongodb collection
    RGJSONWriteBehind(GB, keysPrefix='ENV_COLLECTION1_PREFIX_KEY',
    connector=collection1Connector, name='Collection1WriteBehind',
    version='99.99.99')
    ENV_MONGODB_CONNECTION_URL=mongodb://usrAdmin:passwordAdmin@10.10.20.2:27017/dbSpeedMernDemo?authSource=admin
    ENV_DB_NAME=dbSpeedMernDemo
    ENV_COLLECTION1_NAME=movies
    ENV_COLLECTION1_PRIMARY_KEY=movieId
    ENV_COLLECTION1_PREFIX_KEY=movie

    The code above demonstrates how you would sync a "movies" collection in MongoDB with Redis using the "movie" key prefix.

    To get this working you first need to load the python file into redis-server:

    $ redis-cli rg.pyexecute "`cat write-behind.py`" REQUIREMENTS rgsync pymongo==3.12.0

    Now, insert a JSON item in to Redis starting with the prefix specified in the python file (i.e. "movie"):

    # redis-cli command
    > JSON.SET movie:123 $ '{"movieId":123,"name":"RRR","isActive": true}'

    Now, verify whether the JSON is inserted into MongoDB.

    Additional Resources For Syncing with Redis and Other Databases

    1. Redis gear sync with MongoDB
    2. RG.PYEXECUTE
    3. rgsync
    4. gears-cli
    5. RedisGears dynamic script

    Probabilistic Data and Queries with Redis

    Redis Stack supports probabilistic datatypes and queries. Below you will find a stock leaderboard example:

    # Reserve a new leaderboard filter
    > TOPK.RESERVE trending-stocks 12 50 4 0.9
    "OK"

    # Add a new entries to the leaderboard
    > TOPK.ADD trending-stocks AAPL AMD MSFT INTC GOOG FB NFLX GME AMC TSLA
    1) "null" ...

    # Get the leaderboard
    > TOPK.LIST trending-stocks
    1) "AAPL"
    2) "AMD"
    2) "MSFT" ...

    # Get information about the leaderboard
    > TOPK.INFO trending-stocks
    1) "k"
    2) "12"
    3) "width"
    4) "50"
    5) "depth"
    6) "4"
    7) "decay"
    8) "0.90000000000000002"

    More details in docs

    TimeSeries Data and Queries with Redis

    Redis Stack supports time-series use cases such as IoT, stock prices, and telemetry. You can ingest and query millions of samples and events at the speed of Redis. You can also use a variety of queries for visualization and monitoring with built-in connectors to popular tools like Grafana, Prometheus, and Telegraf.

    The following example demonstrates how you might store temperature sensor readings in Redis Stack:

    # Create new time-series, for example temperature readings
    > TS.CREATE temperature:raw DUPLICATE_POLICY LAST
    "OK"

    # Create a bucket for monthly aggregation
    > TS.CREATE temperature:monthly DUPLICATE_POLICY LAST
    "OK"

    # Automatically aggregate based on time-weighted average
    > TS.CREATERULE temperature:raw temperature:monthly AGGREGATION twa 2629800000
    "OK"

    # Add data to the raw time-series
    > TS.MADD temperature:raw 1621666800000 52 ...
    1) "1621666800000" ...

    # View the monthly time-weighted average temperatures
    > TS.RANGE temperature:monthly 0 +
    1) 1) "1621666800000"
    2) "52" ...

    # Delete compaction rule
    > TS.DELETERULE temperature:raw temperature:monthly
    "OK"

    # Delete partial time-series
    > TS.DEL temperature:raw 0 1621666800000
    (integer) 1

    More details in docs

    Additional Resources

    - + \ No newline at end of file diff --git a/howtos/ratelimiting/index.html b/howtos/ratelimiting/index.html index b350dc238a..732883f126 100644 --- a/howtos/ratelimiting/index.html +++ b/howtos/ratelimiting/index.html @@ -4,7 +4,7 @@ How to build a Rate Limiter using Redis | The Home of Redis Developers - + @@ -15,7 +15,7 @@ The application will return after each request the following headers. That will let the user know how many requests they have remaining before the run over the limit. On the 10th run server should return an HTTP status code of 429 Too Many Requests

    SETNX is short for "SET if Not eXists". It basically sets key to hold string value if key does not exist. In that case, it is equal to SET. When key already holds a value, no operation is performed. New responses are added key-ip as shown below:

     SETNX your_ip:PING limit_amount
    Example: SETNX 127.0.0.1:PING 10

    More information

    Set a timeout on key:

     EXPIRE your_ip:PING timeout
    Example: EXPIRE 127.0.0.1:PING 1000

    More information

    How the data is accessed:

    Next responses are get bucket:

     GET your_ip:PING
    Example: GET 127.0.0.1:PING

    More information

    Next responses are changed bucket:

     DECRBY your_ip:PING amount
    Example: DECRBY 127.0.0.1:PING 1

    More information

    References

    Redis Launchpad
    - + \ No newline at end of file diff --git a/howtos/redisai/bert-qa-benchmarking-with-redisai-and-redisgears/index.html b/howtos/redisai/bert-qa-benchmarking-with-redisai-and-redisgears/index.html index 56a1813595..8316341903 100644 --- a/howtos/redisai/bert-qa-benchmarking-with-redisai-and-redisgears/index.html +++ b/howtos/redisai/bert-qa-benchmarking-with-redisai-and-redisgears/index.html @@ -4,7 +4,7 @@ Benchmarks for BERT Large Question Answering inference for RedisAI and RedisGears | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Benchmarks for BERT Large Question Answering inference for RedisAI and RedisGears


    Profile picture for Alex Mikhalev
    Author:
    Alex Mikhalev, AI/ML Architect at Nationwide Building Society
    Profile picture for Will Johnston
    Author:
    Will Johnston, Developer Growth Manager at Redis

    Introduction

    In this article, we will explore the challenges and opportunities associated with deploying large BERT Question Answering Transformer models from Hugging Face, using RedisGears and RedisAI to perform a lot of the heavy lifting while also leveraging the in-memory datastore Redis.

    Why do we need RedisAI?

    In data science workloads:

    • You want to load high-performance hardware as close to 100% as possible
    • You prefer to re-calculate results

    However, in a client-facing application:

    • You want to be able to distribute the load evenly, so it never reaches 100%, and client-facing servers can perform additional functions
    • You prefer to cache results of previous calculations and fetch data from the cache as fast as possible to provide a seamless customer experience

    So before we go any further, why should you read this article? Here are some numbers for inspiration:

    First:

    python3 transformers_plain_bert_qa.py
    airborne transmission of respiratory infections is the lack of established methods for the detection of airborne respiratory microorganisms
    10.351818372 seconds

    The above script uses a slightly modified transformer from the default pipeline for BERT QA, and running it on the server takes 10 seconds. The server uses the latest 12th Gen Intel(R) Core(TM) i9-12900K, full cpuinfo.

    However:

    time curl -i -H "Content-Type: application/json" -X POST -d '{"search":"Who performs viral transmission among adults"}' http://localhost:8080/qasearch

    real 0m0.747s
    user 0m0.004s
    sys 0m0.000s

    The scriptscript runs BERT QA inference on each shard, which is equal to the number of available CPUs by default, and returns answers under one second.

    Incredible, right? Let's dive in!

    Background

    BERT Question Answering inference works where the ML model selects an answer from the given text. In other words, BERT QA "thinks" through the following: "What is the answer from the text, assuming the answer to the question exists within the paragraph selected."

    So it's important to select text potentially containing an answer. A typical pattern is to use Wikipedia data to build Open Domain Question Answering.

    Our QA system is a medical domain-specific question/answering pipeline, hence we need a first pipeline that turns data into a knowledge graph. This NLP pipeline is available at Redis LaunchPad, is fully open source, and is described in a previous article. Here is a 5 minute video describing it, and below you will find an architectural overview:

    featured

    BERT Question Answering pipeline and API

    In the BERT QA pipeline (or in any other modern NLP inference task), there are two steps:

    1. Tokenize text - turn text into numbers
    2. Run the inference - large matrix multiplication

    With Redis, we have the opportunity to pre-compute everything and store it in memory, but how do we do it? Unlike with the summarization ML learning task, the question is not known in advance, so we can't pre-compute all possible answers. However, we can pre-tokenize all potential answers (i.e. all paragraphs in the dataset) using RedisGears:

    def parse_sentence(record):
    import redisAI
    import numpy as np
    global tokenizer
    if not tokenizer:
    tokenizer=loadTokeniser()
    hash_tag="{%s}" % hashtag()

    for idx, value in sorted(record['value'].items(), key=lambda item: int(item[0])):
    tokens = tokenizer.encode(value, add_special_tokens=False, max_length=511, truncation=True, return_tensors="np")
    tokens = np.append(tokens,tokenizer.sep_token_id).astype(np.int64)
    tensor=redisAI.createTensorFromBlob('INT64', tokens.shape, tokens.tobytes())

    key_prefix='sentence:'
    sentence_key=remove_prefix(record['key'],key_prefix)
    token_key = f"tokenized:bert:qa:{sentence_key}:{idx}"
    redisAI.setTensorInKey(token_key, tensor)
    execute('SADD',f'processed_docs_stage3_tokenized{hash_tag}', token_key)

    See the full code on GitHub.

    Then for each Redis Cluster shard, we pre-load the BERT QA model by downloading, exporting it into torchscript, then loading it into each shard:

    def load_bert():
    model_file = 'traced_bert_qa.pt'

    with open(model_file, 'rb') as f:
    model = f.read()
    startup_nodes = [{"host": "127.0.0.1", "port": "30001"}, {"host": "127.0.0.1", "port":"30002"}, {"host":"127.0.0.1", "port":"30003"}]
    cc = ClusterClient(startup_nodes = startup_nodes)
    hash_tags = cc.execute_command("RG.PYEXECUTE", "gb = GB('ShardsIDReader').map(lambda x:hashtag()).run()")[0]
    print(hash_tags)
    for hash_tag in hash_tags:
    print("Loading model bert-qa{%s}" %hash_tag.decode('utf-8'))
    cc.modelset('bert-qa{%s}' %hash_tag.decode('utf-8'), 'TORCH', 'CPU', model)
    print(cc.infoget('bert-qa{%s}' %hash_tag.decode('utf-8')))

    The full code is available on GitHub.

    And when a question comes from the user, we tokenize and append the question to the list of potential answers before running the RedisAI model:

        token_key = f"tokenized:bert:qa:{sentence_key}"
    # encode question
    input_ids_question = tokenizer.encode(question, add_special_tokens=True, truncation=True, return_tensors="np")
    t=redisAI.getTensorFromKey(token_key)
    input_ids_context=to_np(t,np.int64)
    # merge (append) with potential answer, context - is pre-tokenized paragraph
    input_ids = np.append(input_ids_question,input_ids_context)
    attention_mask = np.array([[1]*len(input_ids)])
    input_idss=np.array([input_ids])
    num_seg_a=input_ids_question.shape[1]
    num_seg_b=input_ids_context.shape[0]
    token_type_ids = np.array([0]*num_seg_a + [1]*num_seg_b)
    # create actual model runner for RedisAI
    modelRunner = redisAI.createModelRunner(f'bert-qa{hash_tag}')
    # make sure all types are correct
    input_idss_ts=redisAI.createTensorFromBlob('INT64', input_idss.shape, input_idss.tobytes())
    attention_mask_ts=redisAI.createTensorFromBlob('INT64', attention_mask.shape, attention_mask.tobytes())
    token_type_ids_ts=redisAI.createTensorFromBlob('INT64', token_type_ids.shape, token_type_ids.tobytes())
    redisAI.modelRunnerAddInput(modelRunner, 'input_ids', input_idss_ts)
    redisAI.modelRunnerAddInput(modelRunner, 'attention_mask', attention_mask_ts)
    redisAI.modelRunnerAddInput(modelRunner, 'token_type_ids', token_type_ids_ts)
    redisAI.modelRunnerAddOutput(modelRunner, 'answer_start_scores')
    redisAI.modelRunnerAddOutput(modelRunner, 'answer_end_scores')
    # run RedisAI model runner
    res = await redisAI.modelRunnerRunAsync(modelRunner)
    answer_start_scores=to_np(res[0],np.float32)
    answer_end_scores = to_np(res[1],np.float32)
    answer_start = np.argmax(answer_start_scores)
    answer_end = np.argmax(answer_end_scores) + 1
    answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end],skip_special_tokens = True))
    log("Answer "+str(answer))
    return answer

    Checkout the full code, available on GitHub.

    The process for making a BERT QA API call looks like this:

    Architecture Diagram for BERT QA RedisGears and RedisAI

    Here I use two cool features of RedisGears: capturing events on key miss and using async/await to run RedisAI on each shard without locking the primary thread - so that Redis Cluster can continue to serve other customers. For benchmarks, caching responses from RedisAI is disabled. If you are getting response times in nanoseconds on the second call rather then milliseconds, check to make sure the line linked above is commented out.

    Running the Benchmark

    Pre-requisites for running the benchmark:

    Assuming you are running Debian or Ubuntu and have Docker and docker-compose installed (or can create a virtual environment via conda), run the following commands:

    git clone --recurse-submodules https://github.com/applied-knowledge-systems/the-pattern.git
    cd the-pattern
    ./bootstrap_benchmark.sh

    The above commands should end with a curl call to the qasearch API, since Redis caching is disabled for the benchmark.

    Next, invoke curl like this:

    time curl -i -H "Content-Type: application/json" -X POST -d '{"search":"Who performs viral transmission among adults"}' http://localhost:8080/qasearch

    Expect the following output, or something similar based on your runtime environment:

    HTTP/1.1 200 OK
    Server: nginx/1.18.0 (Ubuntu)
    Date: Sun, 29 May 2022 12:05:39 GMT
    Content-Type: application/json
    Content-Length: 2120
    Connection: keep-alive

    {"links":[{"created_at":"2002","rank":13,"source":"C0001486","target":"C0152083"}],"results":[{"answer":"adenovirus","sentence":"The medium of 40 T150 flasks of adenovirus transducer dec CAR CHO cells yielded 0 5 1 my of purified msCEACAM1a 1 4 protein","sentencekey":"sentence:PMC125375.xml:{mG}:202","title":"Crystal structure of murine sCEACAM1a[1,4]: a coronavirus receptor in the CEA family"}] OUTPUT_REDUCTED}

    I modified the output of the API for the benchmark to return results from all shards - even if the answer is empty. In the run above five shards return answers. The overall API call response takes less than one second with all additional hops to search in RedisGraph!

    Architecture Diagram for BERT QA API call

    Deep Dive into the Benchmark

    Let's dig deeper into what's happening under the hood:

    You should have a sentence key with shard id, which you get by looking at the "Cache key" from docker logs -f rgcluster. In my setup the cache key is, "bertqa{6fd}_PMC169038.xml:{6fd}:33_Who performs viral transmission among adults". If you think it looks like a function call it's because it is a function call. It is triggered if the key isn't present in the Redis Cluster, which for the benchmark will be every time since if you remember we disabled caching the output.

    One more thing to figure out from the logs is the port of the shard corresponding to the hashtag, also known as the shard id. It is the text found in betweeen the curly brackets – looks like {6fd} above. The same will be in the output for the export_load script. In my case the cache key was found in "30012.log", so my port is 30012.

    Next I run the following command:

    redis-cli -c -p 300012 -h 127.0.0.1 get "bertqa{6fd}_PMC169038.xml:{6fd}:33_Who performs viral transmission among adults"

    and then run the benchmark:

    redis-benchmark -p 30012 -h 127.0.0.1 -n 10 get "bertqa{6fd}_PMC169038.xml:{6fd}:33_Who performs viral transmission among adults"
    ====== get bertqa{6fd}_PMC169038.xml:{6fd}:33_Who performs viral transmission among adults ======
    10 requests completed in 0.04 seconds
    50 parallel clients
    3 bytes payload
    keep alive: 1

    10.00% <= 41 milliseconds
    100.00% <= 41 milliseconds
    238.10 requests per second

    If you are wondering, -n = number of times. In this case we run the benchmark 10 times. You can also add:

    csv if you want to output in CSV format

    precision 3 if you want more decimals in the ms

    More information about the benchmarking tool can be found on the redis.io Benchmarks page.

    if you don't have redis-utils installed locally, you can use Docker as follows:

    docker exec -it rgcluster /bin/bash
    redis-benchmark -p 30012 -h 127.0.0.1 -n 10 get "bertqa{6fd}_PMC169038.xml:{6fd}:33_Who performs viral transmission among adults"
    ====== get bertqa{6fd}_PMC169038.xml:{6fd}:33_Who performs viral transmission among adults ======
    10 requests completed in 1.75 seconds
    50 parallel clients
    99 bytes payload
    keep alive: 1
    host configuration "save":
    host configuration "appendonly": no
    multi-thread: no

    Latency by percentile distribution:
    0.000% <= 243.711 milliseconds (cumulative count 1)
    50.000% <= 987.135 milliseconds (cumulative count 5)
    75.000% <= 1577.983 milliseconds (cumulative count 8)
    87.500% <= 1662.975 milliseconds (cumulative count 9)
    93.750% <= 1744.895 milliseconds (cumulative count 10)
    100.000% <= 1744.895 milliseconds (cumulative count 10)

    Cumulative distribution of latencies:
    0.000% <= 0.103 milliseconds (cumulative count 0)
    10.000% <= 244.223 milliseconds (cumulative count 1)
    20.000% <= 409.343 milliseconds (cumulative count 2)
    30.000% <= 575.487 milliseconds (cumulative count 3)
    40.000% <= 821.247 milliseconds (cumulative count 4)
    50.000% <= 987.135 milliseconds (cumulative count 5)
    60.000% <= 1157.119 milliseconds (cumulative count 6)
    70.000% <= 1497.087 milliseconds (cumulative count 7)
    80.000% <= 1577.983 milliseconds (cumulative count 8)
    90.000% <= 1662.975 milliseconds (cumulative count 9)
    100.000% <= 1744.895 milliseconds (cumulative count 10)

    Summary:
    throughput summary: 5.73 requests per second
    latency summary (msec):
    avg min p50 p95 p99 max
    1067.296 243.584 987.135 1744.895 1744.895 1744.895

    The platform only has 20 articles and 8 Redis nodes (4 masters + 4 slaves), so relevance would be wrong and it doesn't need a lot of memory.

    AI.INFO

    Now let's check how long our RedisAI model runs on the {6fd} shard:

    127.0.0.1:30012> AI.INFO bert-qa{6fd}
    1) "key"
    2) "bert-qa{6fd}"
    3) "type"
    4) "MODEL"
    5) "backend"
    6) "TORCH"
    7) "device"
    8) "CPU"
    9) "tag"
    10) ""
    11) "duration"
    12) (integer) 8928136
    13) "samples"
    14) (integer) 58
    15) "calls"
    16) (integer) 58
    17) "errors"
    18) (integer) 0

    bert-qa{6fd} is the key of the actual (very large) model saved. The AI.INFO command gives us a cumulative duration of 8928136 microseconds and 58 calls, which is approximately 153 milliseconds per call.

    Let's double-check to make sure that's right by resetting the stats and then re-runnning the benchmark.

    First, reset the stats:

    127.0.0.1:30012> AI.INFO bert-qa{6fd} RESETSTAT
    OK
    127.0.0.1:30012> AI.INFO bert-qa{6fd}
    1) "key"
    2) "bert-qa{6fd}"
    3) "type"
    4) "MODEL"
    5) "backend"
    6) "TORCH"
    7) "device"
    8) "CPU"
    9) "tag"
    10) ""
    11) "duration"
    12) (integer) 0
    13) "samples"
    14) (integer) 0
    15) "calls"
    16) (integer) 0
    17) "errors"
    18) (integer) 0

    Then, re-run the benchmark:

    redis-benchmark -p 30012 -h 127.0.0.1 -n 10 get "bertqa{6fd}_PMC169038.xml:{6fd}:33_Who performs viral transmission among adults"
    ====== get bertqa{6fd}_PMC169038.xml:{6fd}:33_Who performs viral transmission among adults ======
    10 requests completed in 1.78 seconds
    50 parallel clients
    99 bytes payload
    keep alive: 1
    host configuration "save":
    host configuration "appendonly": no
    multi-thread: no

    Latency by percentile distribution:
    0.000% <= 188.927 milliseconds (cumulative count 1)
    50.000% <= 995.839 milliseconds (cumulative count 5)
    75.000% <= 1606.655 milliseconds (cumulative count 8)
    87.500% <= 1692.671 milliseconds (cumulative count 9)
    93.750% <= 1779.711 milliseconds (cumulative count 10)
    100.000% <= 1779.711 milliseconds (cumulative count 10)

    Cumulative distribution of latencies:
    0.000% <= 0.103 milliseconds (cumulative count 0)
    10.000% <= 189.183 milliseconds (cumulative count 1)
    20.000% <= 392.191 milliseconds (cumulative count 2)
    30.000% <= 540.159 milliseconds (cumulative count 3)
    40.000% <= 896.511 milliseconds (cumulative count 4)
    50.000% <= 996.351 milliseconds (cumulative count 5)
    60.000% <= 1260.543 milliseconds (cumulative count 6)
    70.000% <= 1456.127 milliseconds (cumulative count 7)
    80.000% <= 1606.655 milliseconds (cumulative count 8)
    90.000% <= 1692.671 milliseconds (cumulative count 9)
    100.000% <= 1779.711 milliseconds (cumulative count 10)

    Summary:
    throughput summary: 5.62 requests per second
    latency summary (msec):
    avg min p50 p95 p99 max
    1080.454 188.800 995.839 1779.711 1779.711 1779.711

    Now check the stats again:

    AI.INFO bert-qa{6fd}
    1) "key"
    2) "bert-qa{6fd}"
    3) "type"
    4) "MODEL"
    5) "backend"
    6) "TORCH"
    7) "device"
    8) "CPU"
    9) "tag"
    10) ""
    11) "duration"
    12) (integer) 1767749
    13) "samples"
    14) (integer) 20
    15) "calls"
    16) (integer) 20
    17) "errors"
    18) (integer) 0

    Now we get 88387.45 microseconds per call, which is pretty fast! Also, considering we started with 10 seconds per call, I think the benefits of using RedisAI in combination with RedisGears are pretty obvious. However, the trade-off is high memory usage.

    There are many ways to optimize this deployment. For example, you can add a FP16 quantization and ONNX runtime. If you would like to try that, this script will be a good starting point.

    Using Grafana to monitor RedisGears throughput, CPU, and Memory usage

    Thanks to the contribution of Mikhail Volkov, we can now observe RedisGears and RedisGraph throughput and memory consumption using Grafana. When you cloned repository it started Graphana Docker, which has pre-build templates to monitor RedisCluster, including RedisGears and RedisAI, and Graph - which is Redis with RedisGraph. "The Pattern" dashboard provides an overview, with all the key benchmark metrics you care about:

    Grafana for RedisGraph

    Grafana for RedisCluster

    - + \ No newline at end of file diff --git a/howtos/redisbloom/images/index.html b/howtos/redisbloom/images/index.html index 6481431dad..a8b1986a7e 100644 --- a/howtos/redisbloom/images/index.html +++ b/howtos/redisbloom/images/index.html @@ -4,7 +4,7 @@ images | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/howtos/redisbloom/index.html b/howtos/redisbloom/index.html index bc18a1a504..63197c9bb4 100644 --- a/howtos/redisbloom/index.html +++ b/howtos/redisbloom/index.html @@ -4,7 +4,7 @@ Probabilistic data structures using Redis Stack | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    Probabilistic data structures using Redis Stack


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Redis Stack extends Redis core to support additional probabilistic data structures. It allows for solving computer science problems in a constant memory space with extremely fast processing and a low error rate. It supports scalable Bloom and Cuckoo filters to determine (with a specified degree of certainty) whether an item is present or absent from a collection.

    Redis Stack provides four probabilistic data types:

    • Bloom filter: A probabilistic data structure that can test for presence. A Bloom filter is a data structure designed to tell you, rapidly and memory-efficiently, whether an element is present in a set. Bloom filters typically exhibit better performance and scalability when inserting items (so if you're often adding items to your dataset then Bloom may be ideal).
    • Cuckoo filter: An alternative to Bloom filters, Cuckoo filters comes with additional support for deletion of elements from a set. These filters are quicker on check operations.
    • Count-min sketch: A count-min sketch is generally used to determine the frequency of events in a stream. You can query the count-min sketch to get an estimate of the frequency of any given event.
    • Top-K: The Top-K probabilistic data structure in Redis Stack is a deterministic algorithm that approximates frequencies for the top k items. With Top-K, you’ll be notified in real time whenever elements enter into or are expelled from your Top-K list. If an element add-command enters the list, the dropped element will be returned.

    In this tutorial, you will see how Redis Stack provides Redis with support for low latency and compact probabilistic data structures.

    Step 1. Create a free Cloud account

    Create your free Redis Enterprise Cloud account. Once you click on “Get Started”, you will receive an email with a link to activate your account and complete your signup process.

    tip

    For a limited time, use TIGER200 to get $200 credits on Redis Enterprise Cloud and try all the advanced capabilities!

    🎉 Click here to sign up

    Step 2. Create Your database

    Choose your preferred cloud vendor. Select the region and then click "Let's start free" to create your free database automatically.

    tip

    If you want to create a custom database with your preferred name and type of redis, click "Create a custom database" option shown in the image.

    create database

    Step 3. Verify the database details

    You will be provided with Public endpoint URL and "Redis Stack" as the type of database with the list of modules that comes by default.

    verify database

    Step 4. Using RedisInsight

    RedisInsight is a visual tool that lets you do both GUI- and CLI-based interactions with your Redis database, and so much more when developing your Redis based application. It is a fully-featured pure Desktop GUI client that provides capabilities to design, develop and optimize your Redis application. It works with any cloud provider as long as you run it on a host with network access to your cloud-based Redis server. It makes it easy to discover cloud databases and configure connection details with a single click. It allows you to automatically add Redis Enterprise Software and Redis Enterprise Cloud databases.

    Follow this link to install RedisInsight v2 on your local system. Assuming that you already have RedisInsight v2 installed on your MacOS, you can browse through the Applications and click "RedisInsight-v2" to bring up the Redis Desktop GUI tool.

    Step 5. Add Redis database

    access redisinsight

    Step 6. Enter Redis Enterprise Cloud details

    Add the Redis Enterprise cloud database endpoint, port and password.

    access redisinsight

    Step 7. Verify the database under RedisInsight dashboard

    database details

    Step 8. Getting Started with probabilistic data structures in Redis Stack

    In the next steps you will use some basic Redis Stack commands. You can run them from the Redis command-line interface (redis-cli) or use the CLI available in RedisInsight. (See part 2 of this tutorial to learn more about using the RedisInsight CLI) To interact with probabilistic data structures in Redis, you use the BF.ADD and BF.EXISTS commands.

    Let’s go ahead and test drive some probabilistic data structure-specific operations. We will create a basic dataset based on unique visitors’ IP addresses, and you will see how to:

    • Create a Bloom filter
    • Determine whether or not an item exists in the Bloom filter
    • Add one or more items to the Bloom filter
    • Determine whether or not a unique visitor’s IP address exists

    Let’s walk through the process step-by-step:

    Create a Bloom filter

    Use the BF.ADD command to add a unique visitor IP address to the Bloom filter as shown here:

    >> BF.ADD unique_visitors 10.94.214.120
    (integer) 1
    (1.75s)

    Determine whether or not an item exists

    Use the BF.EXISTS command to determine whether or not an item may exist in the Bloom filter:

    >> BF.EXISTS unique_visitors 10.94.214.120
    (integer) 1
    >> BF.EXISTS unique_visitors 10.94.214.121
    (integer) 0
    (1.46s)

    In the above example, the first command shows the result as “1”, indicating that the item may exist, whereas the second command displays "0", indicating that the item certainly may not exist.

    Add one or more items to the Bloom filter

    Use the BF.MADD command to add one or more items to the Bloom filter, creating the filter if it does not yet exist. This command operates identically to BF.ADD, except it allows multiple inputs and returns multiple values:

    >> BF.MADD unique_visitors 10.94.214.100 10.94.214.200 10.94.214.210 10.94.214.212
    1) (integer) 1
    2) (integer) 1
    3) (integer) 1
    4) (integer) 1

    As shown above, the BF.MADD allows you to add one or more visitors’ IP addresses to the Bloom filter.

    Determine whether or not a unique visitor’s IP address exists

    Use BF.MEXISTS to determine if one or more items may exist in the filter or not:

    >> BF.MEXISTS unique_visitors 10.94.214.200 10.94.214.212
    1) (integer) 1
    2) (integer) 1
     >> BF.MEXISTS unique_visitors 10.94.214.200 10.94.214.213
    1) (integer) 1
    2) (integer) 0

    In the above example, the first command shows the result as “1” for both the visitors’ IP addresses, indicating that both of them may exist. The second command displays "0" for one of the visitor’s IP addresses, indicating that the item certainly does not exist.

    Next Steps

    - + \ No newline at end of file diff --git a/howtos/redisbloom/with-dotnet/redisbloom-withdotnet/index.html b/howtos/redisbloom/with-dotnet/redisbloom-withdotnet/index.html index 2074565009..e5bca8acbc 100644 --- a/howtos/redisbloom/with-dotnet/redisbloom-withdotnet/index.html +++ b/howtos/redisbloom/with-dotnet/redisbloom-withdotnet/index.html @@ -4,7 +4,7 @@ Using Probabilistic Data Structures with .NET | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Using Probabilistic Data Structures with .NET

    Using probabilistic data structures in Redis Stack allows you to efficiently keep track of presence, heavy hitters, and counts on large streams of data. To use probabilistic data structures in .NET, you should use the StackExchange.Redis library. To get started with that package, follow our getting started guide. Once you have a reference to an IDatabase object, you will need to use the db.Execute and db.ExecuteAsync methods to run the custom commands you want.

    Bloom Filters

    Bloom Filters are a powerful data structure that can tell if an item is in a set, think a username on a sign-up form. They're incredibly compact, requiring only 10-20 bits per item you want to add, and extremely quick to add items to, and equally fast to determine if an item is in a set or not.

    Create a Filter

    You don't need to create a Bloom Filter explicitly as any call of BF.ADD to a non-existent key will automatically create a Bloom Filter for you. However, if you want to tell Redis ahead of time how much data the Bloom Filter can expect and the error rate that you want for that data (the number of false positives it will report), You can use the BF.RESERVE command:

    await db.ExecuteAsync("BF.RESERVE", "bf:username", .01, 10000);

    The above command will reserve a Bloom Filter on the key bf:username that expects 10000 records and will have an error rate of 1%.

    Adding to a Filter

    To add to a Bloom Filter, all you need is to use the BF.ADD command:

    await db.ExecuteAsync("BF.ADD", "bf:username", "Kermit");

    The preceding code will add the username Kermit to the bf:username filter.

    Check if an Item is in a Filter

    To check if an item has been added to a Bloom Filter yet, you will use the BF.EXISTS command:

    var exists = await db.ExecuteAsync("BF.EXISTS", "bf:username", "Kermit") == 1;

    After running that command, if the Bloom Filter reports that it contains the item, exists will be true; otherwise, exists will be false.

    Count-Min Sketch

    You can use Count-Min Sketches to count the number of times an item has been added to a set quickly and compactly. Although, of course, like other probabilistic data structures, it has some margin of error. In this case, it can over count the number of occurrences. The dimensions of the sketch determine the likelihood of this.

    Creating a Count-Min Sketch

    There are two ways to create a Count-Min Sketch, by probability and by dimension. Creating a Count-Min Sketch by probability will automatically generate a Count-Min Sketch based on the amount of overestimation you want to allow and the likelihood of overestimating a given element. If you want to initialize by dimensions, a Count-Min Sketch will initialize with the provided width and depth.

    await db.ExecuteAsync("CMS.INITBYPROB", "cms:views", .1, .01);

    This code will initialize a Count-Min Sketch. The sketch will have an acceptable overcount of 10% and a probability of overcounting of 1%.

    Adding Items to a Count-Min Sketch

    To add an item to a Count-Min Sketch, you call the CMS.INCRBY command, passing in the quantity of the given item you want to add to the sketch.

    await db.ExecuteAsync("CMS.INCRBY", "cms:views", "Gangnam Style", 1);
    await db.ExecuteAsync("CMS.INCRBY", "cms:views", "Baby Shark", 1);
    await db.ExecuteAsync("CMS.INCRBY", "cms:views", "Gangnam Style", 2);

    The above will add three views of Gangnam Style to the sketch and one view of Baby Shark.

    Querying the Sketch

    To query the number of occurrences of an element in the sketch, you need to use the CMS.QUERY command:

    var numViewsGangnamStyle = (long)await db.ExecuteAsync("CMS.QUERY", "cms:views", "Gangnam Style");
    var numViewsBabyShark = (long)await db.ExecuteAsync("CMS.QUERY", "cms:views", "Baby Shark");
    Console.WriteLine($"Gangnam Style Views: {numViewsGangnamStyle}");
    Console.WriteLine($"Baby Shark Views: {numViewsBabyShark}");

    Cuckoo Filters

    Cuckoo Filters solve a similar problem to Bloom Filters; they allow you to determine if an item has been added to a set yet. However, Cuckoo Filters have slightly different characteristics than Bloom Filters. For example, you may add the same item to a Cuckoo Filter more than once, and they do support delete operations (which introduces the possibility of false negatives in addition to false positives).

    Creating a Cuckoo Filter

    Similar to a Bloom Filter, a Cuckoo Filter is automatically created by adding an item to a Cuckoo Filter that does not exist. However, you may want to reserve a Cuckoo Filter ahead of time explicitly, so it knows precisely how many items you expect and how to expand. To do this, just run the CF.RESERVE command:

    await db.ExecuteAsync("CF.RESERVE", "cf:emails", 10000);

    Adding to a Cuckoo Filter

    To add an item to a Cuckoo Filter, use the CF.ADD command:

    await db.ExecuteAsync("CF.ADD", "cf:emails", "foo@bar.com");
    await db.ExecuteAsync("CF.ADD", "cf:emails", "James.Bond@mi6.com");

    The above will add foo@bar.com and James.Bond@mi6.com to the Cuckoo Filter.

    Checking Item Presence in a Cuckoo Filter

    To check if an item has been added to a Cuckoo Filter yet, use the CF.EXISTS command:

    var jamesEmailExists = (int) await db.ExecuteAsync("CF.EXISTS", "cf:emails", "James.Bond@mi6.com") == 1;
    var str = jamesEmailExists
    ? "James.Bond@mi6.com has already been added"
    : "James.Bond@mi6.com has not been added";
    Console.WriteLine(str);

    Top-K

    The Top-K data structure allows you to keep a compact leader board of heavy-hitters. This data structure can be extremely useful when keeping track of the most popular items in an enormous stream of data as it makes it so you don't have to keep track of all of the counts of all of your records.

    Initializing a Top-K

    To initialize a Top-K, use the TOPK.RESERVE command. This command will reserve a Top-K that will keep track of the highest k items:

    await db.ExecuteAsync("TOPK.RESERVE", "topk:views", 5);

    The above, for example, will keep track of the five most viewed videos sent to the Top-K.

    Add Items to the Top-K

    Adding Items to a Top-K requires the use of the TOPK.ADD command, this command can take however many items you want to insert into it, so if you get a batch of items to send at once, it may make sense to send them all across at the same time. For example, let's say we wanted to send 10,000 updates to the Top-K at the same time from a random set of videos:

    var videos = new[] {"Gangnam Style", "Baby Shark", "Despacito", "Uptown Funk", "See You Again", "Hello", "Roar", "Sorry"};
    var rand = new Random();
    var args = new List<string>(10001){"topk:views"};
    for (var i = 0; i < 10000; i++)
    {
    args.Add(videos[rand.Next(videos.Length)]);
    }

    await db.ExecuteAsync("TOPK.ADD", args.ToArray());

    This code will send them all across in one shot. You can, of course, chunk the items and send them in batches as well. Regardless, this will add items to your Top-K.

    List the Top K Items

    To list the items in your Top-K, you need to query the Top-K using the TOPK.LIST command:

    var topK = (RedisResult[]) await db.ExecuteAsync("TOPK.LIST", "topk:views");
    foreach (var item in topK)
    {
    Console.WriteLine(item);
    }

    This code will get all the items back for you and print them out.

    Query if an Item is in the Top-K

    To see if a given item is present in the Top-K, you would use TOPK.QUERY, passing in the item you want to check membership of:

    var BabySharkInTopK = (int) await db.ExecuteAsync("TOPK.QUERY", "topk:views", "Baby Shark") == 1;
    Console.WriteLine(BabySharkInTopK ? "Baby Shark is in the Top 5" : "Baby Shark is Not in the Top 5" );

    The above code will check if Baby Shark is in the Top 5 for video views from our above example.

    Resources

    • The Code for this Demo can be found in GitHub
    - + \ No newline at end of file diff --git a/howtos/redisearch/images/index.html b/howtos/redisearch/images/index.html index 123824b091..72ee8bc7c8 100644 --- a/howtos/redisearch/images/index.html +++ b/howtos/redisearch/images/index.html @@ -4,7 +4,7 @@ images | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/howtos/redisearch/index.html b/howtos/redisearch/index.html index 29d93ddb19..a6ecf528ca 100644 --- a/howtos/redisearch/index.html +++ b/howtos/redisearch/index.html @@ -4,7 +4,7 @@ Full-text search using Redis Stack | The Home of Redis Developers - + @@ -17,7 +17,7 @@ release_year rating genre

    Before running queries on our new index, though, let’s take a closer look at the elements of the FT.CREATE command:

    • idx:movies: the name of the index, which you will use when doing queries
    • ON hash: the type of structure to be indexed. (Note that Redis Search 2.0 supports only the Hash structure, but this parameter will allow Redis to index other structures in the future.)
    • PREFIX 1 “movies:”: the prefix of the keys that should be indexed. This is a list, so since we want to index only movies:* keys the number is 1. If you want to index movies and TV shows with the same fields, you could use: PREFIX 2 “movies:” “tv_show:”
    • SCHEMA …: defines the schema, the fields, and their type to index. As you can see in the command, we are using TEXT, NUMERIC, and TAG, as well as SORTABLE parameters.

    The Redis Search engine will scan the database using the PREFIX values, and update the index based on the schema definition. This makes it easy to add an index to an existing application that uses Hashes, there’s no need to change your code.

    Search the movies in the Redis

    You can now use the FT.SEARCH to search your database, for example, to search all movies sorted by release year:

    >  FT.SEARCH idx:movies * SORTBY release_year ASC RETURN 2 title release_year
    1) (integer) 2
    2) "movies:1003"
    3) 1) "release_year"
    2) "1972"
    3) "title"
    4) "The Godfather"
    4) "movies:1002"
    5) 1) "release_year"
    2) "1980"
    3) "title"
    4) "Star Wars: Episode V - The Empire Strikes Back"

    You can also search “action” movies that contain “star” in the index (in our sample index, the term “star” will occur only in the title):

    >  FT.SEARCH idx:movies "star @genre:{action}" RETURN 2 title release_year
    1) (integer) 1
    2) "movies:1002"
    3) 1) "title"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "release_year"
    4) "1980"

    The FT.SEARCH command is the base command to search your database, it has many options and is associated with a powerful and rich query syntax that you can find in the documentation. (Note: You can also use the index to do data aggregation using the FT.AGGREGATE command.)

    Next Steps

    - + \ No newline at end of file diff --git a/howtos/redisgears/index.html b/howtos/redisgears/index.html index 8c798f961b..39fd8a6554 100644 --- a/howtos/redisgears/index.html +++ b/howtos/redisgears/index.html @@ -4,7 +4,7 @@ RedisGears Tutorial | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    RedisGears Tutorial


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    RedisGears is an engine for data processing in Redis. RedisGears supports batch and event-driven processing for Redis data. To use RedisGears, you write functions that describe how your data should be processed. You then submit this code to your Redis deployment for remote execution.

    RedisGears is implemented by a Redis module. To use RedisGears, you’ll need to make sure that your Redis deployment has the module installed.

    Step 1. Installing RedisGears

    Before you can use RedisGears, you have to install the RedisGears module. We will be using redislabs/redismod Docker image for this demonsration

     docker run -d -p 6379:6379 redislabs/redismod

    Step 2. Verifying if RedisGears module is enabled:

    You can directly use redis-cli CLI to verify if RedisGears module("rg") is properly loaded or not.

     redis-cli
    redis-cli
    127.0.0.1:6379> info modules
    # Modules
    ..
    module:name=rg,ver=10006,api=1,filters=0,usedby=[],using=[ai],options=[]

    Step 3. Create a "wordcount" Python script

    To demonstrate RedisGears functionality, we will be performing a unique word count on the existing strings. We will be writing a RedisGears function to do this.

    Open a file called wordcount.py, and add the following code:

     gb = GearsBuilder()
    gb.map(lambda x: x['value']) # map each key object to its string value
    gb.flatmap(lambda x: x.split()) # split each string into a list of words
    gb.countby() # run a count-unique on these words
    gb.run()

    Step 4. Execute the CLI

     redis-cli rg.pyexecute "`cat wordcount.py`"
    1) 1) "{'key': 'world', 'value': 1}"
    2) "{'key': 'galaxy', 'value': 1}"
    3) "{'key': 'hello', 'value': 3}"
    4) "{'key': 'universe', 'value': 1}"
    2) (empty array)

    The results here show the number of occurences of each word in all of our strings. So, we’ve effectively processed the data in our Redis database all at once, in a batch.

    References

    - + \ No newline at end of file diff --git a/howtos/redisgraph/csvtograph/index.html b/howtos/redisgraph/csvtograph/index.html index e3147331e7..97eb4ecdee 100644 --- a/howtos/redisgraph/csvtograph/index.html +++ b/howtos/redisgraph/csvtograph/index.html @@ -4,7 +4,7 @@ How to build RedisGraph databases from CSV inputs in Easy Steps | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    How to build RedisGraph databases from CSV inputs in Easy Steps

    End-of-Life Notice

    Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

    End of support is scheduled for January 31, 2025.

    Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    RedisGraph is the fastest graph database that processes complex graph operations in real time, 10x – 600x faster than any other graph database. It shows how your data is connected through multiple visualization integrations including RedisInsight, Linkurious, and Graphileon. It allows you to query graphs using the industry-standard Cypher query language and you can easily use graph capabilities from application code.

    My Image

    RedisGraph Bulk Loader

    If you have a bunch of CSV files that you want to load to RedisGraph database, you must try out this Bulk Loader utility. Rightly called RedisGraph Bulk Loader, this tool is written in Python and helps you in building RedisGraph databases from CSV inputs. This utility requires a Python 3 interpreter.

    Follow the steps below to load CSV data into RedisGraph database:

    Step 1. Run Redis Stack Docker container

     docker run -p 6379:6379 --name redis/redis-stack

    Step 2. Verify if RedisGraph module is loaded

     info modules
    # Modules
    module:name=graph,ver=20405,api=1,filters=0,usedby=[],using=[],options=[]

    Step 3. Clone the Bulk Loader Utility

     $ git clone https://github.com/RedisGraph/redisgraph-bulk-loader

    Step 4. Installing the RedisGraph Bulk Loader tool

    The bulk loader can be installed using pip:

      pip3 install redisgraph-bulk-loader

    Or

     pip3 install git+https://github.com/RedisGraph/redisgraph-bulk-loader.git@master

    Step 5. Create a Python virtual env for this work

     python3 -m venv redisgraphloader

    Step 6. Step into the venv:

     source redisgraphloader/bin/activate

    Step 7. Install the dependencies for the bulk loader:

     pip3 install -r requirements.txt

    If the above command doesn’t work, install the below modules:

     pip3 install pathos
    pip3 install redis
    pip3 install click

    Step 8. Install groovy

     groovy generateCommerceGraphCSVForImport.groovy

    Step 9. Verify the .csv files created

     head -n2 *.csv
    ==> addtocart.csv <==
    src_person,dst_product,timestamp
    0,1156,2010-07-20T16:11:20.551748

    ==> contain.csv <==
    src_person,dst_order
    2000,1215

    ==> order.csv <==
    _internalid,id,subTotal,tax,shipping,total
    2000,0,904.71,86.40,81.90,1073.01

    ==> person.csv <==
    _internalid,id,name,address,age,memberSince
    0,0,Cherlyn Corkery,146 Kuphal Isle South Jarvis MS 74838-0662,16,2010-03-18T16:25:20.551748

    ==> product.csv <==
    _internalid,id,name,manufacturer,msrp
    1000,0,Sleek Plastic Car,Thiel Hills and Leannon,385.62

    ==> transact.csv <==
    src_person,dst_order
    2,2000

    ==> view.csv <==
    src_person,dst_product,timestamp
    0,1152,2012-04-14T11:23:20.551748

    Step 10. Run the Bulk loader script

      python3 bulk_insert.py prodrec-bulk -n person.csv -n product.csv -n order.csv -r view.csv -r addtocart.csv -r transact.csv -r contain.csv
    person [####################################] 100%
    1000 nodes created with label 'person'
    product [####################################] 100%
    1000 nodes created with label 'product'
    order [####################################] 100%
    811 nodes created with label 'order'
    view [####################################] 100%
    24370 relations created for type 'view'
    addtocart [####################################] 100%
    6458 relations created for type 'addtocart'
    transact [####################################] 100%
    811 relations created for type 'transact'
    contain [####################################] 100%
    1047 relations created for type 'contain'
    Construction of graph 'prodrec-bulk' complete: 2811 nodes created, 32686 relations created in 1.021761 seconds
     graph.query prodrec "match (p:person) where p.id=200 return p.name"
    1) 1) "p.name"
    2) (empty array)
    3) 1) "Cached execution: 0"
    2) "Query internal execution time: 0.518300 milliseconds"

    Step 10 . Install RedisInsight

    To use RedisInsight on a local Mac, you can download from the RedisInsight page on the RedisLabs website:

    Click this link to access a form that allows you to select the operating system of your choice.

    My Image

    If you have Docker Engine installed in your system, the quick way is to run the following command:

     docker run -d -v redisinsight:/db -p 8001:8001 redislabs/redisinsight:latest

    Step 11. Accessing RedisInsight

    Next, point your browser to http://localhost:8001.

    Step 12. Run the Graph Query

     GRAPH.QUERY "prodrec-bulk" "match (p:person) where p.id=199 return p"

    My Image

    References

    Redis Launchpad
    - + \ No newline at end of file diff --git a/howtos/redisgraph/explore-python-code/index.html b/howtos/redisgraph/explore-python-code/index.html index ffd8c35453..878c5e8e58 100644 --- a/howtos/redisgraph/explore-python-code/index.html +++ b/howtos/redisgraph/explore-python-code/index.html @@ -4,7 +4,7 @@ Explore Python Codebase using RedisGraph | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    Explore Python Codebase using RedisGraph

    End-of-Life Notice

    Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

    End of support is scheduled for January 31, 2025.

    Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Pycograph is an open source tool that creates a RedisGraph model of your Python code. The tool lets you to explore your Python codebase with graph queries. With Pycograph, you can query the python code with Cypher. Additionally, it is possible to visualize the graph model using RedisInsight.

    The project is hosted over https://pycograph.com/ and package is available in PyPI repository. It was introduced for the first time by Reka Horvath during RedisConf 2021.

    Let us see how to explore Python code using Pycograph and RedisGraph below:

    Step 1. Install Docker

     curl -sSL https://get.docker.com/ | sh

    Step 2. Install Pycograph from PyPI

     pip install pycograph

    Step 3. Start RedisGraph Module

    The redis/redis-stack Docker image provides you all the essential Redis modules.

     docker run -d -p 6379:6379 redis/redis-stack

    Step 4. Run RedisInsight

     docker run -d -v redisinsight:/db -p 8001:8001 redislabs/redisinsight:latest

    Step 5. Load a sample Python code

    We will be using a popular Docker compose project for our sample python code. Clone the Docker Compose project repository

      git clone https://github.com/docker/compose

    Step 6. Load Python Code

    Load your project's code with the pycograph load command:

     pycograph load --project-dir compose
    Results:
     Graph successfully updated.
    {'graph name': 'compose', 'nodes added': 2428, 'edges added': 11239}

    Step 7. Visualize the project

    Open RedisInsight, select RedisGraph on the left menu and run the below query:

    Query #1: Return every node

     MATCH (n) RETURN n

    You will see the below output:

    My Image

    Query #2: Return every non-test object

    My Image

    Query #3. Displaying the function behind the docker-compose up command

    A query returning exactly one node using the unique full_name property. Double-click on the node to display all its relationships.

    My Image

    Query #4. Displaying the 'docker-compose up' and its calls relationships

    My Image

    Query #5. Displaying the 'docker-compose up' vs 'docker-compose run'

    Functions called by the Docker Compose top level commands up and run

    My Image

    References:

    Redis Launchpad
    - + \ No newline at end of file diff --git a/howtos/redisgraph/getting-started/index.html b/howtos/redisgraph/getting-started/index.html index 804fd2ec7d..9be94a58b5 100644 --- a/howtos/redisgraph/getting-started/index.html +++ b/howtos/redisgraph/getting-started/index.html @@ -4,7 +4,7 @@ Graph database using Redis Stack | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    Graph database using Redis Stack

    End-of-Life Notice

    Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

    End of support is scheduled for January 31, 2025.

    Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    RedisGraph is a Redis module that enables enterprises to process any kind of connected data much faster than with traditional relational or existing graph databases. RedisGraph implements a unique data storage and processing solution (with sparse-adjacency matrices and GraphBLAS) to deliver the fastest and most efficient way to store, manage, and process connected data in graphs. With RedisGraph, you can process complex transactions 10 - 600 times faster than with traditional graph solutions while using 50 - 60% less memory resources than other graph databases!

    Step 1. Create a free Cloud account

    Create your free Redis Enterprise Cloud account. Once you click on “Get Started”, you will receive an email with a link to activate your account and complete your signup process.

    tip

    For a limited time, use TIGER200 to get $200 credits on Redis Enterprise Cloud and try all the advanced capabilities!

    🎉 Click here to sign up

    Step 2. Create Your database

    Choose your preferred cloud vendor. Select the region and then click "Let's start free" to create your free database automatically.

    tip

    If you want to create a custom database with your preferred name and type of Redis, click "Create a custom database" option shown in the image.

    create database

    Step 3. Verify the database details

    You will be provided with Public endpoint URL and "Redis Stack" as the type of database with the list of modules that comes by default.

    verify database

    Step 4. Install RedisInsight

    RedisInsight is a visual tool that lets you do both GUI- and CLI-based interactions with your Redis database, and so much more when developing your Redis based application. It is a fully-featured pure Desktop GUI client that provides capabilities to design, develop and optimize your Redis application. It works with any cloud provider as long as you run it on a host with network access to your cloud-based Redis server. It makes it easy to discover cloud databases and configure connection details with a single click. It allows you to automatically add Redis Enterprise Software and Redis Enterprise Cloud databases.

    You can install Redis Stack on your local system to get RedisInsight GUI tool up and running. Ensure that you have brew package installed in your Mac system.

     brew tap redis-stack/redis-stack
    brew install --cask redis-stack
      ==> Installing Cask redis-stack-redisinsight
    ==> Moving App 'RedisInsight-preview.app' to '/Applications/RedisInsight-preview.app'
    🍺 redis-stack-redisinsight was successfully installed!
    ==> Installing Cask redis-stack
    🍺 redis-stack was successfully installed!

    Go to Applications and click "RedisInsight-v2" to bring up the Redis Desktop GUI tool.

    Step 5. Add Redis database

    access redisinsight

    Step 6. Enter Redis Enterprise Cloud details

    Add the Redis Enterprise cloud database endpoint, port and password.

    access redisinsight

    Step 7. Verify the database under RedisInsight dashboard

    database details

    Step 8. Getting Started with RedisGraph

    In the following steps, we will use some basic RediGraph commands to insert data into a graph and then query the graph. You can run them from the Redis command-line interface (redis-cli) or use the CLI available in RedisInsight. (See part 2 of this tutorial to learn more about using the RedisInsight CLI.)

    RedisGraph

    Step 9: Insert data into a graph

    Insert actors

    To interact with RedisGraph you will typically use the GRAPH.QUERY command and execute Cypher queries. Let’s start to insert some actors into the graph:movies graph name, which is automatically created using this command:

    >> GRAPH.QUERY graph:movies "CREATE (:Actor {name:'Mark Hamill', actor_id:1}), (:Actor {name:'Harrison Ford', actor_id:2}), (:Actor {name:'Carrie Fisher', actor_id:3})"

    1) 1) "Labels added: 1"
    2) "Nodes created: 3"
    3) "Properties set: 6"
    4) "Query internal execution time: 0.675400 milliseconds"

    This single query creates three actors, along with their names and unique IDs.

    Insert a movie

    > GRAPH.QUERY graph:movies "CREATE (:Movie {title:'Star Wars: Episode V - The Empire Strikes Back', release_year: 1980 , movie_id:1})"
    1) 1) "Labels added: 1"
    2) "Nodes created: 1"
    3) "Properties set: 3"
    4) "Query internal execution time: 0.392300 milliseconds"

    This single query creates a movie with a title, the release year, and an ID.

    Associate actors and movies

    The core of a graph is the relationships between the nodes, allowing the applications to navigate and query them. Let’s create a relationship between the actors and the movies:

    > GRAPH.QUERY graph:movies "MATCH (a:Actor),(m:Movie) WHERE a.actor_id = 1 AND m.movie_id = 1 CREATE (a)-[r:Acted_in {role:'Luke Skywalker'}]->(m) RETURN r"
    1) 1) "r"
    2) 1) 1) 1) 1) "id"
    2) (integer) 1
    2) 1) "type"
    2) "Acted_in"
    3) 1) "src_node"
    2) (integer) 0
    4) 1) "dest_node"
    2) (integer) 3
    5) 1) "properties"
    2) 1) 1) "role"
    2) "Luke Skywalker"
    3) 1) "Properties set: 1"
    2) "Relationships created: 1"
    3) "Query internal execution time: 0.664800 milliseconds"

    This command created a new relation indicating that the actor Mark Hamill acted in Star Wars: Episode V as Luke Skywalker.

    Let’s repeat this process for the other actors:

    > GRAPH.QUERY graph:movies "MATCH (a:Actor), (m:Movie) WHERE a.actor_id = 2 AND m.movie_id = 1 CREATE (a)-[r:Acted_in {role:'Han Solo'}]->(m) RETURN r"
    > GRAPH.QUERY graph:movies "MATCH (a:Actor), (m:Movie) WHERE a.actor_id = 3 AND m.movie_id = 1 CREATE (a)-[r:Acted_in {role:'Princess Leila'}]->(m) RETURN r"

    You can also do all of this in a single query, for example:

    > GRAPH.QUERY graph:movies "CREATE (:Actor {name:'Marlo Brando', actor_id:4})-[:Acted_in {role:'Don Vito Corleone'}]->(:Movie {title:'The Godfather', release_year: 1972 , movie_id:2})"

    1) 1) "Nodes created: 2"
    2) "Properties set: 6"
    3) "Relationships created: 1"
    4) "Query internal execution time: 0.848500 milliseconds"

    Querying the graph

    Now that you have data in your graph, you’re ready to ask some questions, such as:

    “What are the titles of all the movies?”

    > GRAPH.QUERY graph:movies "MATCH (m:Movie) RETURN m.title"

    1) 1) "m.title"
    2) 1) 1) "Star Wars: Episode V - The Empire Strikes Back"
    2) 1) "The Godfather"
    3) 1) "Query internal execution time: 0.349400 milliseconds"

    “What is the information for the movie with the ID of 1?”

    > GRAPH.QUERY graph:movies "MATCH (m:Movie) WHERE m.movie_id = 1 RETURN m"

    1) 1) "m"
    2) 1) 1) 1) 1) "id"
    2) (integer) 3
    2) 1) "labels"
    2) 1) "Movie"
    3) 1) "properties"
    2) 1) 1) "title"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    2) 1) "release_year"
    2) (integer) 1980
    3) 1) "movie_id"
    2) (integer) 1
    3) 1) "Query internal execution time: 0.365800 milliseconds"

    “Who are the actors in the movie 'Star Wars: Episode V - The Empire Strikes Back' and what roles did they play?”

    > GRAPH.QUERY graph:movies "MATCH (a:Actor)-[r:Acted_in]-(m:Movie) WHERE m.movie_id = 1 RETURN a.name,m.title,r.role"
    1) 1) "a.name"
    2) "m.title"
    3) "r.role"
    2) 1) 1) "Mark Hamill"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "Luke Skywalker"
    2) 1) "Harrison Ford"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "Han Solo"
    3) 1) "Carrie Fisher"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "Princess Leila"
    3) 1) "Query internal execution time: 0.641200 milliseconds"

    Visualizing graph databases using RedisInsight

    If you are using RedisInsight, you can visualize and navigate into the nodes and relationships graphically. Click on the RedisGraph menu entry on the left and enter the query:

    MATCH (m:Actor) return m

    Click on the Execute button, and double click on the actors to follow the relationships You should see a graph like this one:

    RedisGraph

    Resources

    Next Steps

    Redis Launchpad
    - + \ No newline at end of file diff --git a/howtos/redisgraph/index.html b/howtos/redisgraph/index.html index 576b08b1d2..e57819e82a 100644 --- a/howtos/redisgraph/index.html +++ b/howtos/redisgraph/index.html @@ -4,7 +4,7 @@ RedisGraph Tutorial | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    RedisGraph Tutorial

    End-of-Life Notice

    Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

    End of support is scheduled for January 31, 2025.

    Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).

    The following links provides you with the available options to get started with RedisGraph
    Getting Started with RedisGraph
    Explore Python code using RedisGraph
    Building RedisGraph databases from CSV Inputs
    - + \ No newline at end of file diff --git a/howtos/redisgraph/redisgraph-cheatsheet/index.html b/howtos/redisgraph/redisgraph-cheatsheet/index.html index 9bea461be1..95468491f2 100644 --- a/howtos/redisgraph/redisgraph-cheatsheet/index.html +++ b/howtos/redisgraph/redisgraph-cheatsheet/index.html @@ -4,7 +4,7 @@ RedisGRAPH Cheatsheet | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    RedisGRAPH Cheatsheet

    End-of-Life Notice

    Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

    End of support is scheduled for January 31, 2025.

    Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).

    CommandPurposeSyntax
    Execute a query against a named graphGRAPH.QUERY <graph name> "<query>"
    Executes a read only query against a named graphGRAPH.RO_QUERY <graph name> "<query>"
    Query Structure: MATCH describes the relationship between queried entities, using ascii art to represent pattern(s) to match againstGRAPH.QUERY <graph name> "MATCH <pattern>"
    The OPTIONAL MATCH clause is a MATCH variant that produces null values for elements that do not match successfully, rather than the all-or-nothing logic for patterns in MATCH clausesGRAPH.QUERY <graph name> "MATCH <pattern> OPTIONAL MATCH <pattern>"
    The WHERE clause is not mandatory, but if you want to filter results, you can specify your predicates here. Click the link to see supported operations.GRAPH.QUERY <graph name> "MATCH <pattern> WHERE <pattern>"
    The RETURN clause essentially defines the result set. In this way, a tailored result-set is customized.GRAPH.QUERY <graph name> "MATCH <pattern> WHERE <pattern> RETURN <pattern>"
    ORDER BY specifies that the output be sorted and howGRAPH.QUERY <graph name> "MATCH <pattern> WHERE <pattern> RETURN <pattern> ORDER BY <variables> [ASC|DESC]"
    SKIP optional clause allows for a specified number of records to be omitted from the result setGRAPH.QUERY <graph name> "MATCH <pattern> WHERE <pattern> RETURN <pattern> ORDER BY <variables> <[ASC | DESC ] SKIP \" where "n" is an integer
    LIMIT clause is not mandatory and can be used to limit the number of records returned by a RETURN set.GRAPH.QUERY <graph name> "MATCH <pattern> WHERE <pattern> RETURN <pattern> ORDER BY <variables> [ASC | DESC ] LIMIT " where "n" is an integer
    LIMIT clause is not mandatory and can be used to limit the number of records returned by a RETURN set.GRAPH.QUERY <graph name> "MATCH \<pattern> WHERE \<pattern> RETURN \<pattern> ORDER BY \<variables> [ASC | DESC ] LIMIT \<n>" where "n" is an integer
    CREATE clause is used to introduce new nodes and relationshipsGRAPH.QUERY <graph name> "MATCH <pattern> CREATE <nodes>"
    DELETE clause is used to remove both nodes and relationshipsGRAPH.QUERY <graph name> "MATCH \<pattern> DELETE \<alias>"
    SET clause is used to create or update properties on nodes and relationshipsGRAPH.QUERY <graph name> "MATCH \<pattern> SET \<property value>"
    MERGE clause ensures that a path exists in the graphGRAPH.QUERY <graph name> "MERGE \<property value>"
    WITH clause allows parts of queries to be independently executed and have their results handled uniquely.GRAPH.QUERY <graph name> "MATCH \<pattern> WITH \<property value> AS \<property value>"
    - + \ No newline at end of file diff --git a/howtos/redisgraph/redisgraphmovies/index.html b/howtos/redisgraph/redisgraphmovies/index.html index 5dca6f947e..8bd36b3d14 100644 --- a/howtos/redisgraph/redisgraphmovies/index.html +++ b/howtos/redisgraph/redisgraphmovies/index.html @@ -4,7 +4,7 @@ Building Movies database app using React, NodeJS and Redis | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    Building Movies database app using React, NodeJS and Redis

    End-of-Life Notice

    Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

    End of support is scheduled for January 31, 2025.

    Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    IMDb(Internet Movie Database) is the world's most popular and authoritative source for information on movies, TV shows and celebrities. This application is an IMDB clone with basic account authentication and movie recommendation functionality. You will learn the power of RedisGraph and NodeJS to build a simple movie database.

    moviedb

    Tech Stack

    • Frontend - React
    • Backend - Node.js, Redis, RedisGraph

    Step 1. Install the pre-requisites

    • Node - v13.14.0+
    • NPM - v7.6.0+

    Step 2. Run Redis Stack Docker container

     docker run -d -p 6379:6379 redis/redis-stack

    Ensure that Docker container is up and running:

     docker ps
    CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
    fd5ef30f025a redis/redis-stack "redis-server --load…" 2 hours ago Up 2 hours 0.0.0.0:6379->6379/tcp nervous_buck

    Step 3. Run RedisInsight Docker container

     docker run -d -v redisinsight:/db -p 8001:8001 redislabs/redisinsight:latest

    Ensure that Docker container is up and runnig

     docker ps
    CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
    264db1706dcc redislabs/redisinsight:latest "bash ./docker-entry…" About an hour ago Up About an hour 0.0.0.0:8001->8001/tcp angry_shirley
    fd5ef30f025a redis/redis-stack "redis-server --load…" 2 hours ago Up 2 hours 0.0.0.0:6379->6379/tcp nervous_buck

    Step 4. Clone the repository

     git clone https://github.com/redis-developer/basic-redisgraph-movie-demo-app-nodejs

    Step 5. Setting up environment variables

    Copy .env.sample to .env and add the following details:

      REDIS_ENDPOINT_URL = "Redis server URI"
    REDIS_PASSWORD = "Password to the server"

    Step 6. Install the dependencies

     npm install

    Step 7. Run the backend server

     node app.js

    Step 8. Run the client

     cd client
    yarn install
    yarn start

    Step 9. Accessing the Movie app

    Open http://IP:3000 to access the movie app

    movieapp

    Step 10. Sign up for a new account

    moviedb

    Enter the details to create a new account:

    movieapp

    Step 11. Sign-in to movie app

    movieapp

    Step 12. Rate the movie

    movieapp

    Step 13. View the list of rated movie

    movieapp

    Step 14. View directed movie over RedisInsight

     GRAPH.QUERY "MovieApp" "MATCH (director:Director {tmdbId: \"4945\"})-[:DIRECTED]->(movie:Movie) RETURN DISTINCT movie,director"

    movieapp

    Step 15. Find movies where actor acted in.

    Run the below query under RedisGraph to find the author acted in a movie

     GRAPH.QUERY "MovieApp" "MATCH (actor:Actor {tmdbId: \"8537\"})-[:ACTED_IN_MOVIE]->(movie:Movie) RETURN DISTINCT movie,actor"

    movieapp

    Step 16. Store a user in a database

     CREATE (user:User {id: 32,
    username: "user", password: "hashed_password", api_key: "525d40da10be8ec75480"})
    RETURN user

    movieapp

    Step 17. Find a user by username

     MATCH (user:User {username: "user"}) RETURN user

    movieapp

    How it works?

    The app consumes the data provided by the Express API and presents it through some views to the end user, including:

    • Home page
    • Sign-up and Login pages
    • Movie detail page
    • Actor and Director detail page
    • User detail page

    Home page

    How it works

    The home page shows the genres and a brief listing of movies associated with them.

    How the data is stored

    Add a new genre:

     create (g:Genre{name:"Adventure"})

    Add a movie:

     create (m:Movie {
    url: "https://themoviedb.org/movie/862",
    id:232,
    languages:["English"],
    title:"Toy Story",
    countries:["USA"],
    budget:30000000,
    duration:81,
    imdbId:"0114709",
    imdbRating:8.3,
    imdbVotes:591836,
    movieId:42,
    plot:"...",
    poster:"https://image.tmd...",
    poster_image:"https://image.tmdb.or...",
    released:"1995-11-22",
    revenue:373554033,
    runtime:$runtime,
    tagline:"A cowboy doll is profoundly t...",
    tmdbId:"8844",
    year:"1995"})

    Set genre to a movie:

     MATCH (g:Genre), (m:Movie)
    WHERE g.name = "Adventure" AND m.title = "Toy Story"
    CREATE (m)-[:IN_GENRE]->(g)

    How the data is accessed

    Get genres:

     MATCH (genre:Genre) RETURN genre

    Get moves by genre:

     MATCH (movie:Movie)-[:IN_GENRE]->(genre)
    WHERE toLower(genre.name) = toLower("Film-Noir") OR id(genre) = toInteger("Film-Noir")
    RETURN movie

    Code example: Get movies with genre

     const getByGenre = function (session, genreId) {
    const query = [
    'MATCH (movie:Movie)-[:IN_GENRE]->(genre)',
    'WHERE toLower(genre.name) = toLower($genreId) OR id(genre) = toInteger($genreId)',
    'RETURN movie',
    ].join('\n');

    return session
    .query(query, {
    genreId,
    })
    .then((result) => manyMovies(result));
    };

    Sign-up and Login pages

    moviedb moviedb

    To be able to rate movies a user needs to be logged in: for that a basic JWT-based authentication system is implemented, where user details are stored in the RedisGraph for persistence.

    How the data is stored

    Store user in the database:

     CREATE (user:User {id: 32,
    username: "user", password: "hashed_password", api_key: "525d40da10be8ec75480"})
    RETURN user

    How the data is accessed

    Find by user name:

     MATCH (user:User {username: "user"}) RETURN user

    Code Example: Find user

     const me = function (session, apiKey) {
    return session
    .query('MATCH (user:User {api_key: $api_key}) RETURN user', {
    api_key: apiKey,
    })
    .then((foundedUser) => {
    if (!foundedUser.hasNext()) {
    throw {message: 'invalid authorization key', status: 401};
    }
    while (foundedUser.hasNext()) {
    const record = foundedUser.next();
    return new User(record.get('user'));
    }
    });
    };

    Movie detail page

    How it works

    On this page a user can rate the film and view the Actors/directors who participated in the production of the film.

    How the data is stored

    Associate actor with a movie:

     MATCH (m:Movie) WHERE m.title="Jumanji" CREATE (a:Actor :Person{
    bio:"Sample...",
    bornIn:"Denver, Colorado, USA",
    imdbId:"0000245",
    name:"Robin Williams",
    poster:"https://image.tmdb.org/t/p/w440_and_...",
    tmdbId:"2157",
    url:"https://themoviedb.org/person/2157"})-[r:ACTED_IN_MOVIE
    {role: "Alan Parrish"}]->(m)

    Associate director with a movie:

     MATCH (m:Movie) WHERE m.title="Dead Presidents" CREATE (d:Director :Person{
    bio: "From Wikipedia, the free e...",
    bornIn: "Detroit, Michigan, USA",
    imdbId: "0400436",
    name: "Albert Hughes",
    tmdbId: "11447",
    url: "https://themoviedb.org/person/11447"})-[r:DIRECTED]->(m)

    How the data is accessed

    Find movie by id with genre, actors and director:

     MATCH (movie:Movie {tmdbId: $movieId})
    OPTIONAL MATCH (movie)<-[my_rated:RATED]-(me:User {id: "e1e3991f-fe81-439e-a507-aa0647bc0b88"})
    OPTIONAL MATCH (movie)<-[r:ACTED_IN_MOVIE]-(a:Actor)
    OPTIONAL MATCH (movie)-[:IN_GENRE]->(genre:Genre)
    OPTIONAL MATCH (movie)<-[:DIRECTED]-(d:Director)
    WITH DISTINCT movie, my_rated, genre, d, a, r
    RETURN DISTINCT movie,
    collect(DISTINCT d) AS directors,
    collect(DISTINCT a) AS actors,
    collect(DISTINCT genre) AS genres

    Code Example: Get movie detail

     const getById = function (session, movieId, userId) {
    if (!userId) throw {message: 'invalid authorization key', status: 401};
    const query = [
    'MATCH (movie:Movie {tmdbId: $movieId})\n' +
    ' OPTIONAL MATCH (movie)<-[my_rated:RATED]-(me:User {id: $userId})\n' +
    ' OPTIONAL MATCH (movie)<-[r:ACTED_IN_MOVIE]-(a:Actor)\n' +
    ' OPTIONAL MATCH (movie)-[:IN_GENRE]->(genre:Genre)\n' +
    ' OPTIONAL MATCH (movie)<-[:DIRECTED]-(d:Director)\n' +
    ' WITH DISTINCT movie, my_rated, genre, d, a, r\n' +
    ' RETURN DISTINCT movie,\n' +
    ' collect(DISTINCT d) AS directors,\n' +
    ' collect(DISTINCT a) AS actors,\n' +
    ' collect(DISTINCT genre) AS genres',
    ].join(' ');
    return session
    .query(query, {
    movieId: movieId.toString(),
    userId: userId.toString(),
    })
    .then((result) => {
    if (result.hasNext()) {
    return _singleMovieWithDetails(result.next());
    }
    throw {message: 'movie not found', status: 404};
    });
    };

    Actor and Director detail page

    How it works

    How the data is accessed

    Find movies where actor acted in:

     MATCH (actor:Actor {tmdbId: "8537"})-[:ACTED_IN_MOVIE]->(movie:Movie)
    RETURN DISTINCT movie,actor

    Find movies directed by:

     MATCH (director:Director {tmdbId: "4945"})-[:DIRECTED]->(movie:Movie)
    RETURN DISTINCT movie,director

    Get movies directed by

     const getByDirector = function (session, personId) {
    const query = [
    'MATCH (director:Director {tmdbId: $personId})-[:DIRECTED]->(movie:Movie)',
    'RETURN DISTINCT movie,director',
    ].join('\n');

    return session
    .query(query, {
    personId,
    })
    .then((result) => manyMovies(result));
    };

    User detail page

    How it works

    Shows the profile info and movies which were rated by user

    How the data is stored

    Set rating for a movie:

     MATCH (u:User {id: 42}),(m:Movie {tmdbId: 231})
    MERGE (u)-[r:RATED]->(m)
    SET r.rating = "7"
    RETURN m

    How the data is accessed

    Get movies and user ratings:

     MATCH (:User {id: "d6b31131-f203-4d5e-b1ff-d13ebc06934d"})-[rated:RATED]->(movie:Movie)
    RETURN DISTINCT movie, rated.rating as my_rating

    Get rated movies for user

     const getRatedByUser = function (session, userId) {
    return session
    .query(
    'MATCH (:User {id: $userId})-[rated:RATED]->(movie:Movie) \
    RETURN DISTINCT movie, rated.rating as my_rating',
    {userId},
    )
    .then((result) =>
    result._results.map((r) => new Movie(r.get('movie'), r.get('my_rating'))),
    );
    };

    Data types:

    • The data is stored in various keys and various relationships.
      • There are 5 types of data
        • User
        • Director
        • Actor
        • Genre
        • Movie

    Each type has its own properties

    • Actor: id, bio, born , bornIn, imdbId, name, poster, tmdbId, url
    • Genre: id, name
    • Director: id, born, bornIn, imdbId, name, tmdbId, url
    • User: id, username, password, api_key
    • Movie: id, url, languages, countries, budget, duration, imdbId, imdbRating, indbVotes, movieId, plot, poster, poster_image, released, revenue, runtime, tagline, tmdbId, year

    And there are 4 types of relationship:

    • User-RATED->Movie
    • Director-DIRECTED->Movie
    • Actor-ACTED_IN_MOVIE->Movie
    • Movie-IN_GENRE->Genre

    References

    - + \ No newline at end of file diff --git a/howtos/redisgraph/using-dotnet/index.html b/howtos/redisgraph/using-dotnet/index.html index 0a86b55f68..bdc73aa1c4 100644 --- a/howtos/redisgraph/using-dotnet/index.html +++ b/howtos/redisgraph/using-dotnet/index.html @@ -4,7 +4,7 @@ How to query Graph data in Redis using .NET | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    How to query Graph data in Redis using .NET

    End-of-Life Notice

    Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

    End of support is scheduled for January 31, 2025.

    Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).

    RedisGraph enables you to store and query graph data in Redis using the [Cypher Query Language](https://opencypher.org/). In this article, we will discuss the usage of RedisGraph with .NET.

    NRedisGraph

    We'll use the NRedisGraph package in this tutorial. To install the package in your project, use dotnet add package NRedisGraph.

    Running RedisGraph

    The easiest way to get up and running with RedisGraph locally is to use the RedisGraph docker image:

    docker run -p 6379:6379 redis/redis-stack-server:latest

    The above command will start an instance of Redis locally with the RedisGraph module loaded, and you will be able to connect to it on localhost:6379

    Connecting

    NRedisGraph makes use of the StackExchange.Redis project which is installed along with NRedisGraph. To create the RedisGraph object you'll first create a ConnectionMultiplexer, and pull a reference to an IDatabase object from it, and then initialize the RedisGraph with the IDatabase object:

    var muxer = ConnectionMultiplexer.Connect("localhost");
    var db = muxer.GetDatabase();
    var graph = new RedisGraph(db);

    Sending a Query

    Querying in RedisGraph applies to a wide array of operations, but fundamentally when executing queries with NRedisGraph, all you need to do is execute graph.Query or graph.QueryAsync passing in the name of the graph you want to query and the query you want to run. For example, we'll be using the graph pets for the remainder of this tutorial, pets is the name of the key the graph will be stored at. Hence any call to graph.Query or graph.QueryAsync will first pass in pets to indicate the graph to work with.

    Creating a Node

    To create a node in RedisGraph, you'll use the Create Operation. Let's start by making 2 Humans, Alice and Bob:

    var createBobResult = await graph.QueryAsync("pets", "CREATE(:human{name:'Bob',age:32})");
    await graph.QueryAsync("pets", "CREATE(:human{name:'Alice',age:30})");

    Running a Query against RedisGraph will result in a ResultSet. This result will contain some metadata about the result of the query in the Statistics section and any results generated by the query. In the above case, the only thing returned is the statistics for the query, which you can print out directly from the results object:

    Console.WriteLine($"Nodes Created:{createBobResult.Statistics.NodesCreated}");
    Console.WriteLine($"Properties Set:{createBobResult.Statistics.PropertiesSet}");
    Console.WriteLine($"Labels Created:{createBobResult.Statistics.LabelsAdded}");
    Console.WriteLine($"Operation took:{createBobResult.Statistics.QueryInternalExecutionTime}");

    You can create nodes with other labels by simply executing another CREATE statement. For example, if we wanted to create a 'pet' named 'Honey' who is a 5-year-old greyhound, we would run:

    await graph.QueryAsync("pets", "CREATE(:pet{name:'Honey',age:5,species:'canine',breed:'Greyhound'})");

    Creating Relationships

    Like creating nodes, you can also create relationships in RedisGraph using the Query/QueryAsync commands to create relationships between nodes in RedisGraph. For example, to establish the owner relationship between Bob and the Greyhound Honey, you would use the following:

    await graph.QueryAsync("pets",
    "MATCH(a:human),(p:pet) WHERE(a.name='Bob' and p.name='Honey') CREATE (a)-[:OWNS]->(p)");

    You could establish other relationships as well between nodes, say, for example, both Bob and Alice both walk Honey you could add the connections:

    await graph.QueryAsync("pets",
    "MATCH(a:human),(p:pet) WHERE(a.name='Alice' and p.name='Honey') CREATE (a)-[:WALKS]->(p)");
    await graph.QueryAsync("pets",
    "MATCH(a:human),(p:pet) WHERE(a.name='Bob' and p.name='Honey') CREATE (a)-[:WALKS]->(p)");

    Querying Relationships

    Now that we've created a few Nodes and Relationships between nodes, we can query things in the Graph, again using Query and QueryAsync. So, for example, if we wanted to find all of Honey's owners, we would issue the following query:

    var matches = await graph.QueryAsync("pets", "MATCH(a:human),(p:pet) where (a)-[:OWNS]->(p) and p.name='Honey' return a");

    We can then iterate over the resultant matches, which is the same ResultSet class we were using before, but it will have actual results we can access this time.

    foreach (var match in matches)
    {
    Console.WriteLine(((Node) match.Values.First()).PropertyMap["name"].Value);
    }

    We can also find all the walkers of Honey by finding all the human's who have a WALKS relationship with Honey:

    matches = await graph.QueryAsync("pets", "MATCH(a:human),(p:pet) where (a)-[:WALKS]->(p) and p.name='Honey' return a");

    Then if we wanted to find all of Bob's dogs, we would query the graph and find all the canines who have an OWNS relationship with a human named Bob:

    matches = await graph.QueryAsync("pets", "MATCH(a:human),(p:pet) where (a)-[:OWNS]->(p) and p.species='canine' and a.name='Bob' return p");

    Resources

    • Code for this demo is available in GitHub
    • To learn more about RedisGraph, check out the docs site
    • To learn more about The Cypher Query Language, check out opencypher.org
    - + \ No newline at end of file diff --git a/howtos/redisgraph/using-go/index.html b/howtos/redisgraph/using-go/index.html index fb42f5a526..9248842e4a 100644 --- a/howtos/redisgraph/using-go/index.html +++ b/howtos/redisgraph/using-go/index.html @@ -4,7 +4,7 @@ How to query Graph data in Redis using Go | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    How to query Graph data in Redis using Go

    End-of-Life Notice

    Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

    End of support is scheduled for January 31, 2025.

    Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    RedisGraph is the fastest graph database that processes complex graph operations in real time, 10x – 600x faster than any other graph database. Show how your data is connected through multiple visualization integrations including RedisInsight, Linkurious, and Graphileon. Query graphs using the industry-standard Cypher query language and easily use graph capabilities from application code.

    RedisGraph Go Client

    The redisgraph-go is a Golang client for the RedisGraph module. It relies on redigo for Redis connection management and provides support for RedisGraph's QUERY, EXPLAIN, and DELETE commands.

    Follow the steps below to get started with RedisGraph with Go:

    Step 1. Run Redis Stack Docker container

     docker run -p 6379:6379 --name redis/redis-stack

    Step 2. Verify if RedisGraph module is loaded

     info modules
    # Modules
    module:name=graph,ver=20405,api=1,filters=0,usedby=[],using=[],options=[]

    Step 3. Loading the RedisGraph Module

     go get github.com/redislabs/redisgraph-go

    Step 4. Clone the repository

     git clone https://github.com/RedisGraph/redisgraph-go

    Step 5. Running the Test suite

     go test
    found packages redisgraph (client_test.go) and main (main.go) in /Users/ajeetraina/projects/redisgraph-go

    Step 6. Running the Go Program

     go run main.go
    +----------+-------+--------+
    | p.name | p.age | c.name |
    +----------+-------+--------+
    | John Doe | 33 | Japan |
    +----------+-------+--------+

    Cached execution 0.000000
    Query internal execution time 3.031700
    Visited countries by person:

    Name: John Doe

    Age: 33
    Pathes of persons vi

    Step 7. Monitor the Graph query

     redis-cli
    127.0.0.1:6379> monitor
    OK
    1633495122.588292 [0 172.17.0.1:58538] "GRAPH.DELETE" "social"
    1633495122.589641 [0 172.17.0.1:58538] "GRAPH.QUERY" "social" "CREATE (UPoQSvSnBD:person{gender:\"male\",status:\"single\",name:\"John Doe\",age:33}),(ZNxbsnHGoO:country{name:\"Japan\"}),(UPoQSvSnBD)-[:visited]->(ZNxbsnHGoO)" "--compact"
    1633495122.591407 [0 172.17.0.1:58538] "GRAPH.QUERY" "social" "MATCH (p:person)-[v:visited]->(c:country)\n RETURN p.name, p.age, c.name" "--compact"
    1633495122.593040 [0 172.17.0.1:58538] "GRAPH.QUERY" "social" "MATCH p = (:person)-[:visited]->(:country) RETURN p" "--compact"
    1633495122.594405 [0 172.17.0.1:58538] "GRAPH.QUERY" "social" "CALL db.labels()" "--compact"
    1633495122.595552 [0 172.17.0.1:58538] "GRAPH.QUERY" "social" "CALL db.propertyKeys()" "--compact"
    1633495122.596942 [0 172.17.0.1:58538] "GRAPH.QUERY" "social" "CALL db.relationshipTypes()" "--compact"

    Step 8. Install RedisInsight

    Run the RedisInsight container. The easiest way is to run the following command:

     docker run -d -v redisinsight:/db -p 8001:8001 redislabs/redisinsight:latest

    Step 9. Accessing RedisInsight

    Next, point your browser to http://localhost:8001.

    Step 10. Run the Graph Query

    You can use the limit clause to limit the number of records returned by a query:

    GRAPH.QUERY "social" "MATCH (n) RETURN n"

    My Image

    References

    Redis Launchpad
    - + \ No newline at end of file diff --git a/howtos/redisgraph/using-javascript/index.html b/howtos/redisgraph/using-javascript/index.html index d75dbf53a3..63f4fe4d55 100644 --- a/howtos/redisgraph/using-javascript/index.html +++ b/howtos/redisgraph/using-javascript/index.html @@ -4,7 +4,7 @@ How to query Graph data in Redis using JavaScript | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    How to query Graph data in Redis using JavaScript

    End-of-Life Notice

    Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

    End of support is scheduled for January 31, 2025.

    Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    RedisGraph is the fastest graph database that processes complex graph operations in real time, 10x – 600x faster than any other graph database. Show how your data is connected through multiple visualization integrations including RedisInsight, Linkurious, and Graphileon. Query graphs using the industry-standard Cypher query language and easily use graph capabilities from application code.

    RedisGraph JavaScript Client

    Follow the steps below to get started with RedisGraph with Java:

    Step 1. Run Redis Stack Docker container

     docker run -p 6379:6379 --name redis/redis-stack

    Step 2. Verify if RedisGraph module is loaded

     info modules
    # Modules
    module:name=graph,ver=20405,api=1,filters=0,usedby=[],using=[],options=[]

    Step 3. Clone the repository

     git clone https://github.com/RedisGraph/redisgraph.js

    Step 4. Install the packages locally

     npm install redisgraph.js

    Step 5. Write a JavaScript code

    const RedisGraph = require('redisgraph.js').Graph;

    let graph = new RedisGraph('social');

    (async () => {
    await graph.query("CREATE (:person{name:'roi',age:32})");
    await graph.query("CREATE (:person{name:'amit',age:30})");
    await graph.query(
    "MATCH (a:person), (b:person) WHERE (a.name = 'roi' AND b.name='amit') CREATE (a)-[:knows]->(b)",
    );

    // Match query.
    let res = await graph.query(
    'MATCH (a:person)-[:knows]->(:person) RETURN a.name',
    );
    while (res.hasNext()) {
    let record = res.next();
    console.log(record.get('a.name'));
    }
    console.log(res.getStatistics().queryExecutionTime());

    // Match with parameters.
    let param = { age: 30 };
    res = await graph.query('MATCH (a {age: $age}) return a.name', param);
    while (res.hasNext()) {
    let record = res.next();
    console.log(record.get('a.name'));
    }

    // Named paths matching.
    res = await graph.query('MATCH p = (a:person)-[:knows]->(:person) RETURN p');
    while (res.hasNext()) {
    let record = res.next();
    // See path.js for more path API.
    console.log(record.get('p').nodeCount);
    }
    graph.deleteGraph();
    graph.close();
    })();

    Save the above file as "app.js".

    Step 6. Execute the Script

     node app.js
     roi
    0.1789
    amit
    2

    Step 7. Monitor the Graph query

     1632898652.415702 [0 172.17.0.1:64144] "info"
    1632898652.418225 [0 172.17.0.1:64144] "graph.query" "social" "CREATE (:person{name:'roi',age:32})" "--compact"
    1632898652.420399 [0 172.17.0.1:64144] "graph.query" "social" "CREATE (:person{name:'amit',age:30})" "--compact"
    1632898652.421857 [0 172.17.0.1:64144] "graph.query" "social" "MATCH (a:person), (b:person) WHERE (a.name = 'roi' AND b.name='amit') CREATE (a)-[:knows]->(b)" "--compact"
    1632898652.424911 [0 172.17.0.1:64144] "graph.query" "social" "MATCH (a:person)-[:knows]->(:person) RETURN a.name" "--compact"
    1632898652.429658 [0 172.17.0.1:64144] "graph.query" "social" "CYPHER age=30 MATCH (a {age: $age}) return a.name" "--compact"
    1632898652.431221 [0 172.17.0.1:64144] "graph.query" "social" "MATCH p = (a:person)-[:knows]->(:person) RETURN p" "--compact"
    1632898652.433146 [0 172.17.0.1:64144] "graph.query" "social" "CALL db.labels()" "--compact"
    1632898652.434781 [0 172.17.0.1:64144] "graph.query" "social" "CALL db.propertyKeys()" "--compact"
    1632898652.436574 [0 172.17.0.1:64144] "graph.query" "social" "CALL db.relationshipTypes()" "--compact"
    1632898652.438559 [0 172.17.0.1:64144] "graph.delete" "social"

    Step 8. Install RedisInsight

    Run the RedisInsight container. The easiest way is to run the following command:

     docker run -d -v redisinsight:/db -p 8001:8001 redislabs/redisinsight:latest

    Step 9. Accessing RedisInsight

    Next, point your browser to http://localhost:8001.

    Step 10. Run the Graph Query

    You can display the number of records returned by a query:

    My Image

    References

    Redis Launchpad
    - + \ No newline at end of file diff --git a/howtos/redisgraph/using-python/index.html b/howtos/redisgraph/using-python/index.html index caf0a118f7..5b14039891 100644 --- a/howtos/redisgraph/using-python/index.html +++ b/howtos/redisgraph/using-python/index.html @@ -4,7 +4,7 @@ How to query Graph data in Redis using Python | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    How to query Graph data in Redis using Python

    End-of-Life Notice

    Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

    End of support is scheduled for January 31, 2025.

    Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    RedisGraph is the fastest graph database that processes complex graph operations in real time, 10x – 600x faster than any other graph database. Show how your data is connected through multiple visualization integrations including RedisInsight, Linkurious, and Graphileon. Query graphs using the industry-standard Cypher query language and easily use graph capabilities from application code.

    My Image

    RedisGraph Python Client

    The 'redisgraph-py' is a package that allows querying Graph data in a Redis database that is extended with the RedisGraph module. The package extends redisgraph-py's interface with RedisGraph's API

    Follow the steps below to get started with RedisGraph with Python:

    Step 1. Run Redis Stack Docker container

     docker run -p 6379:6379 --name redis/redis-stack

    Step 2. Verify if RedisGraph module is loaded

     info modules
    # Modules
    module:name=graph,ver=20405,api=1,filters=0,usedby=[],using=[],options=[]

    Step 3. Loading the Python Module

     pip install redisgraph

    Step 4. Write a python code

     import redis
    from redisgraph import Node, Edge, Graph, Path

    r = redis.Redis(host='localhost', port=6379)

    redis_graph = Graph('social', r)

    john = Node(label='person', properties={'name': 'John Doe', 'age': 33, 'gender': 'male', 'status': 'single'})
    redis_graph.add_node(john)

    japan = Node(label='country', properties={'name': 'Japan'})
    redis_graph.add_node(japan)

    edge = Edge(john, 'visited', japan, properties={'purpose': 'pleasure'})
    redis_graph.add_edge(edge)

    redis_graph.commit()

    query = """MATCH (p:person)-[v:visited {purpose:"pleasure"}]->(c:country)
    RETURN p.name, p.age, v.purpose, c.name"""

    result = redis_graph.query(query)

    # Print resultset
    result.pretty_print()

    # Use parameters
    params = {'purpose':"pleasure"}
    query = """MATCH (p:person)-[v:visited {purpose:$purpose}]->(c:country)
    RETURN p.name, p.age, v.purpose, c.name"""

    result = redis_graph.query(query, params)

    # Print resultset
    result.pretty_print()

    # Use query timeout to raise an exception if the query takes over 10 milliseconds
    result = redis_graph.query(query, params, timeout=10)

    # Iterate through resultset
    for record in result.result_set:
    person_name = record[0]
    person_age = record[1]
    visit_purpose = record[2]
    country_name = record[3]

    query = """MATCH p = (:person)-[:visited {purpose:"pleasure"}]->(:country) RETURN p"""

    result = redis_graph.query(query)

    # Iterate through resultset
    for record in result.result_set:
    path = record[0]
    print(path)


    # All done, remove graph.
    redis_graph.delete()

    Step 5. Execute the Python Script

      python3 test.py
    +-----------+----------+--------------+-----------+
    | b'p.name' | b'p.age' | b'v.purpose' | b'c.name' |
    +-----------+----------+--------------+-----------+
    | John Doe | 33 | pleasure | Japan |
    +-----------+----------+--------------+-----------+

    Cached execution 0.0
    internal execution time 3.3023
    +-----------+----------+--------------+-----------+
    | b'p.name' | b'p.age' | b'v.purpose' | b'c.name' |
    +-----------+----------+--------------+-----------+
    | John Doe | 33 | pleasure | Japan |
    +-----------+----------+--------------+-----------+

    Cached execution 0.0
    internal execution time 0.2475
    <(0)-[0]->(1)>

    Step 6. Monitor the Graph query

     127.0.0.1:6379> monitor
    OK
    1632661901.024018 [0 172.17.0.1:61908] "GRAPH.QUERY" "social" "CREATE (youkjweasb:person{age:33,gender:\"male\",name:\"John Doe\",status:\"single\"}),(jilbktlmgw:country{name:\"Japan\"}),(youkjweasb:person{age:33,gender:\"male\",name:\"John Doe\",status:\"single\"})-[:visited{purpose:\"pleasure\"}]->(jilbktlmgw:country{name:\"Japan\"})" "--compact"
    1632661901.025810 [0 172.17.0.1:61908] "GRAPH.QUERY" "social" "MATCH (p:person)-[v:visited {purpose:\"pleasure\"}]->(c:country)\n\t\t RETURN p.name, p.age, v.purpose, c.name" "--compact"
    1632661901.027485 [0 172.17.0.1:61908] "GRAPH.QUERY" "social" "CYPHER purpose=\"pleasure\" MATCH (p:person)-[v:visited {purpose:$purpose}]->(c:country)\n\t\t RETURN p.name, p.age, v.purpose, c.name" "--compact"
    1632661901.029539 [0 172.17.0.1:61908] "GRAPH.QUERY" "social" "CYPHER purpose=\"pleasure\" MATCH (p:person)-[v:visited {purpose:$purpose}]->(c:country)\n\t\t RETURN p.name, p.age, v.purpose, c.name" "--compact" "timeout" "10"
    1632661901.030965 [0 172.17.0.1:61908] "GRAPH.QUERY" "social" "MATCH p = (:person)-[:visited {purpose:\"pleasure\"}]->(:country) RETURN p" "--compact"
    1632661901.032250 [0 172.17.0.1:61908] "GRAPH.RO_QUERY" "social" "CALL db.labels()" "--compact"
    1632661901.033323 [0 172.17.0.1:61908] "GRAPH.RO_QUERY" "social" "CALL db.propertyKeys()" "--compact"
    1632661901.034589 [0 172.17.0.1:61908] "GRAPH.RO_QUERY" "social" "CALL db.relationshipTypes()" "--compact"
    1632661901.035625 [0 172.17.0.1:61908] "GRAPH.DELETE" "social"

    Let us comment out the last line. Try to query Graph data over over RedisInsight.

    Step 7. Install RedisInsight

    Run the RedisInsight container. The easiest way is to run the following command:

     docker run -d -v redisinsight:/db -p 8001:8001 redislabs/redisinsight:latest

    Step 8. Accessing RedisInsight

    Next, point your browser to http://localhost:8001.

    Step 9. Run the Graph Query

    You can use the limit clause to limit the number of records returned by a query:

    MATCH (n) RETURN n LIMIT 1

    My Image

    Step 10. Run the Graph Query with no LIMIT

    MATCH (n) RETURN n

    My Image

    References

    Redis Launchpad
    - + \ No newline at end of file diff --git a/howtos/redisgraph/using-redisinsight/index.html b/howtos/redisgraph/using-redisinsight/index.html index f1512b6984..3597d876bb 100644 --- a/howtos/redisgraph/using-redisinsight/index.html +++ b/howtos/redisgraph/using-redisinsight/index.html @@ -4,7 +4,7 @@ How to visualize Graph data using RedisInsight | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    How to visualize Graph data using RedisInsight

    End-of-Life Notice

    Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

    End of support is scheduled for January 31, 2025.

    Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    If you’re a Redis user who prefers to use a Graphical User Interface(GUI) for graph queries, then RedisInsight is a right tool for you. It’s 100% free pure desktop Redis GUI that provides easy-to-use browser tools to query, visualize and interactively manipulate graphs. You can add new graphs, run queries and explore the results over the GUI tool.

    RedisInsight supports RedisGraph and allows you to:

    • Build and execute queries
    • Navigate your graphs
    • Browse, analyze, and export results
    • Keyboard shortcuts to zoom
    • Button to reset view; center entire graph
    • Zoom capability via mouse wheel(Double right-click to zoom out, Double right-click to zoom out.)
    • Ability to copy commands with a button click
    • Ability to persist nodes display choices between queries

    As a benefit, you get faster turnarounds when building your application using Redis and RedisGraph.

    Follow the below steps to see how your data is connected via the RedisInsight Browser tool.

    Step 1. Create Redis database

    Follow this link to create a Redis database using Redis Enterprise Cloud with RedisGraph module enabled

    alt_text

    Step 2: Download RedisInsight

    To install RedisInsight on your local system, you need to first download the software from the Redis website.

    Click this link to access a form that allows you to select the operating system of your choice.

    My Image

    Run the installer. After the web server starts, open http://YOUR_HOST_IP:8001 and add a Redis database connection.

    Select "Connect to a Redis database" My Image

    Enter the requested details, including Name, Host (endpoint), Port, and Password. Then click “ADD REDIS DATABASE”.

    Step 3: Click “RedisGraph” and then “Add Graph”

    Select RedisGraph from the menu.

    alt_text

    Step 4. Create a new Graph called “Friends”

    alt_text

    Let us add individuals to the graph. CREATE is used to introduce new nodes and relationships.Run the below cypher query on RedisInsight GUI to add a label called person and property called “name”.

    CREATE (:Person{name:"Tom" }),  (:Person{name:"Alex" }), (:Person{name:"Susan" }), (:Person{name:"Bill" }), (:Person{name:"Jane" })

    alt_text

    As we see that “1” label is added and that refers to a person label. It’s the same for every node and hence created once. Overall there are 5 nodes created. The five “name” properties refer to 5 name properties that have been added.

    Step 6: View all the individuals (nodes)

    Match describes the relationship between queried entities, using ascii art to represent pattern(s) to match against. Nodes are represented by parentheses () , and Relationships are represented by brackets [] .

    As shown below, we have added lowercase “p” in front of our label and is a variable we can make a reference to. It returns all the nodes with a label called “Person”.

    MATCH (p:Person) RETURN p

    alt_text

    You can select "Graph View" on the right menu to display the graphical representation as shown below:

    alt_text

    Step 7. Viewing just one individual(node)

    MATCH (p:Person {name:"Tom"}) RETURN p

    alt_text

    Step 8: Visualize the relationship between the individuals

    Run the below query to build a relationship between two nodes and how the relationship flows from one node(“Tom”) to the another node(“Alex”).

    MATCH (p1:Person {name: "Tom" }), (p2:Person {name: "Alex" }) CREATE (p1)-[:Knows]->(p2)

    The symbol “>” (greater than) shows which way the relationship flows.

    alt_text

    You can view the relationship in the form of graph as shown below:

    alt_text

    Step 9. Create and visualize the multiple relationships

    Run the below query to create and visualize relationsship between the multiple individuals

    MATCH (p1:Person {name: "Tom" }), (p2:Person {name: "Susan" }), (p3:Person {name: "Bill" }) CREATE (p1)-[:Knows]->(p2), (p1)-[:Knows]->(p3)

    alt_text

    Step 10. Create and visualize the relationship between two individuals (Susan and Bill)

    Let us look at how to generate graph showcasing the relationship between two individuals - Susan and Bill

    MATCH (p1:Person {name: "Susan"}), (p2:Person {name: "Bill"}) CREATE (p1)-[:Knows]->(p2)

    alt_text

    Step 11. Create and visualize the relationship between two indiviual (Bill and Jane)

    MATCH (p1:Person {name: "Bill"}), (p2:Person {name: "Jane"}) CREATE (p1)-[:Knows]->(p2)

    alt_text

    alt_text

    Step 12. Building a social networking

    This can be achieved by “friend of friends” kind of relationship. Say, If Tom wanted to social network with Jane. He has two contacts that know Jane - one is Susan and the other person is Bill.

    alt_text

    MATCH p = (p1:Person {name: "Tom" })-[:Knows*1..3]-(p2:Person {name: "Jane"}) RETURN p

    In this query, we assign a variable “p” to a node graph path. We search for “Tom” as p1 and “Jane” as “p2”. We say interested in knows link with 1..3 degree of separation.

    alt_text

    Step 13. Cleaning up the Graph

    alt_text

    Importing the Bulk Graph data

    Let us try to insert bulk data using Python and then extrapolate it in the form of nodes and relationships.

    Step 14. Cloning the repository**

    $ git clone https://github.com/redis-developer/redis-datasets
    cd redis-datasets/redisgraph/datasets/iceandfire

    Step 15. Execute the script

    $ python3 bulk_insert.py GOT_DEMO -n data/character.csv -n data/house.csv -n data/book.csv -n data/writer.csv -r data/wrote.csv -r data/belongs.csv -h 192.168.1.9 -p 6379



    2124 nodes created with label 'b'character''
    438 nodes created with label 'b'house''
    12 nodes created with label 'b'book''
    3 nodes created with label 'b'writer''
    14 relations created for type 'b'wrote''
    2208 relations created for type 'b'belongs''
    Construction of graph 'GOT_DEMO' complete: 2577 nodes created, 2222 relations created in 0.169954 seconds


    Step 16. Run the cypher query

    GRAPH.QUERY GOT_DEMO "MATCH (w:writer)-[wrote]->(b:book) return w,b"

    alt_text

    Additional Resources

    Redis Launchpad
    - + \ No newline at end of file diff --git a/howtos/redisgraph/using-ruby/index.html b/howtos/redisgraph/using-ruby/index.html index 52a5768201..18e9e57d5f 100644 --- a/howtos/redisgraph/using-ruby/index.html +++ b/howtos/redisgraph/using-ruby/index.html @@ -4,7 +4,7 @@ How to query Graph data in Redis using Ruby | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    How to query Graph data in Redis using Ruby

    End-of-Life Notice

    Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

    End of support is scheduled for January 31, 2025.

    Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    RedisGraph is the first queryable Property Graph database to use sparse matrices to represent the adjacency matrix in graphs and linear algebra to query the graph. Few of the notable features of RedisGraph includes:

    • Based on the Property Graph Model
    • Nodes (vertices) and Relationships (edges) that may have attributes
    • Nodes that can be labeled
    • Relationships have a relationship type
    • Graphs represented as sparse adjacency matrices
    • Cypher as query language
    • Cypher queries translated into linear algebra expressions

    RedisGraph is based on a unique approach and architecture that translates Cypher queries to matrix operations executed over a GraphBLAS engine. This new design allows use cases like social graph operation, fraud detection, and real-time recommendation to be executed 10x – 600x faster than any other graph database.

    RedisGraph Ruby Client

    redisgraph-rb is a Ruby gem client for the RedisGraph module. It relies on redis-rb for Redis connection management and provides support for graph QUERY, EXPLAIN, and DELETE commands.

    Follow the steps below to get started with RedisGraph with Ruby:

    Step 1. Run Redis Stack Docker container

     docker run -p 6379:6379 --name redis/redis-stack

    Step 2. Verify if RedisGraph module is loaded

     info modules
    # Modules
    module:name=graph,ver=20405,api=1,filters=0,usedby=[],using=[],options=[]

    Step 3. Loading the RedisGraph Ruby Module

     gem install redisgraph
    Fetching redisgraph-2.0.3.gem
    Successfully installed redisgraph-2.0.3
    1 gem installed

    Step 4. Install the prerequisites

    To ensure prerequisites are installed, run the following: bundle install

     bundle install

    Step 5. Write a Ruby code

    Copy the below sample code and save it in a file "test.rb"

     require 'redisgraph'

    graphname = "sample"

    r = RedisGraph.new(graphname)

    cmd = """CREATE (:person {name: 'Jim', age: 29})-[:works]->(:employer {name: 'Dunder Mifflin'})"""
    response = r.query(cmd)
    response.stats

    cmd = """MATCH ()-[:works]->(e:employer) RETURN e"""

    response = r.query(cmd)

    response.print_resultset

    Step 6. Execute the Ruby code

      ruby test.rb

    Step 7. Monitor the Graph query

     redis-cli
    127.0.0.1:6379> monitor
    OK
    1632716792.038955 [0 172.17.0.1:57804] "info"
    1632716792.041201 [0 172.17.0.1:57804] "GRAPH.QUERY" "sample" "CREATE (:person {name: 'Jim', age: 29})-[:works]->(:employer {name: 'Dunder Mifflin'})" "--compact"
    1632716792.042751 [0 172.17.0.1:57804] "GRAPH.QUERY" "sample" "MATCH ()-[:works]->(e:employer) RETURN e" "--compact"
    1632716792.044241 [0 172.17.0.1:57804] "GRAPH.QUERY" "sample" "CALL db.propertyKeys()"
    1632716812.060458 [0 172.17.0.1:57962] "COMMAND"
    1632716813.148710 [0 172.17.0.1:57962] "GRAPH.QUERY" "sample" "CREATE (:person {name: 'Jim', age: 29})-[:works]->(:employer {name: 'Dunder Mifflin'})" "--compact"

    References

    Redis Launchpad
    - + \ No newline at end of file diff --git a/howtos/redisgraph/using-rust/index.html b/howtos/redisgraph/using-rust/index.html index 5199f2f4aa..0fc5821085 100644 --- a/howtos/redisgraph/using-rust/index.html +++ b/howtos/redisgraph/using-rust/index.html @@ -4,7 +4,7 @@ How to query Graph data in Redis using Rust | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    How to query Graph data in Redis using Rust

    End-of-Life Notice

    Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

    End of support is scheduled for January 31, 2025.

    Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    RedisGraph is the first queryable Property Graph database to use sparse matrices to represent the adjacency matrix in graphs and linear algebra to query the graph. RedisGraph is based on a unique approach and architecture that translates Cypher queries to matrix operations executed over a GraphBLAS engine. This new design allows use cases like social graph operation, fraud detection, and real-time recommendation to be executed 10x – 600x faster than any other graph database. Undoubtedly, it is the fastest graph database that processes complex graph operations in real time, 10x – 600x faster than any other graph database. It primariy shows how your data is connected through multiple visualization integrations including RedisInsight, Linkurious, and Graphileon.

    RedisGraph is a graph database developed from scratch on top of Redis, using the new Redis Modules API to extend Redis with new commands and capabilities. Its main features include: Simple, fast indexing and querying data stored in RAM using memory-efficient custom data structure. Redis Graph is a directed graph where both nodes and relationships are typed - nodes with labels and edges with types. Node/s and edges can and often do contain properties like columns in a SQL-db or keys in a document store.The newer RedisGraph 2.0 benchmark reveals a significant improvements on parallel workload (multiple clients) with a latency improvements up to 6x and throughput improvements up to 5x when performing graph traversals.

    Below are the primary use cases of RedisGraph:

    • Recommendation: It allows you to rapidly find connections between your customers and the experiences they want by examining the relationships between them.
    • Graph-aided search: It allows you to search for single or multiple words or phrases and execute full-text and linguistic queries and implementation in real time over your graph structure.
    • Identity and access management: It allows you to define complex resources access permissions as a graph and enable rapid real-time verification of these permissions with a single query.

    RedisGraph Rust Client

    The Rust programming language is blazingly fast and memory-efficient: with no runtime or garbage collector, it can power performance-critical services, run on embedded devices, and easily integrate with other languages. It is an open-source project developed originally at Mozilla Research. The Rust Library is the foundation of portable Rust software, a set of minimal and battle-tested shared abstractions for the broader Rust ecosystem.

    redisgraph-rs is an idiomatic Rust client for RedisGraph, the graph database by Redis.This crate parses responses from RedisGraph and converts them into ordinary Rust values. It exposes a very flexible API that allows you to retrieve a single value, a single record or multiple records using only one function: Graph::query.

    Follow the steps below to get started with RedisGraph with Rust:

    Step 1. Run Redis Stack Docker container

     docker run -p 6379:6379 --name redis/redis-stack

    Step 2. Verify if RedisGraph module is loaded

     info modules
    # Modules
    module:name=graph,ver=20405,api=1,filters=0,usedby=[],using=[],options=[]

    Step 3. Install Rust

     brew install rust

    Step 4. Clone the repository

      git clone https://github.com/malte-v/redisgraph-rs

    Step 5. Write a rust program

    Copy the below content and save it as "main.rs" under src directory.

     use redis::Client;
    use redisgraph::{Graph, RedisGraphResult};

    fn main() -> RedisGraphResult<()> {
    let client = Client::open("redis://127.0.0.1:6379")?;
    let mut connection = client.get_connection()?;

    let mut graph = Graph::open(connection, "MotoGP".to_string())?;

    // Create six nodes (three riders, three teams) and three relationships between them.
    graph.mutate("CREATE (:Rider {name: 'Valentino Rossi', birth_year: 1979})-[:rides]->(:Team {name: 'Yamaha'}), \
    (:Rider {name:'Dani Pedrosa', birth_year: 1985, height: 1.58})-[:rides]->(:Team {name: 'Honda'}), \
    (:Rider {name:'Andrea Dovizioso', birth_year: 1986, height: 1.67})-[:rides]->(:Team {name: 'Ducati'})")?;

    // Get the names and birth years of all riders in team Yamaha.
    let results: Vec<(String, u32)> = graph.query("MATCH (r:Rider)-[:rides]->(t:Team) WHERE t.name = 'Yamaha' RETURN r.name, r.birth_year")?;
    // Since we know just one rider in our graph rides for team Yamaha,
    // we can also write this and only get the first record:
    let (name, birth_year): (String, u32) = graph.query("MATCH (r:Rider)-[:rides]->(t:Team) WHERE t.name = 'Yamaha' RETURN r.name, r.birth_year")?;
    // Let's now get all the data about the riders we have.
    // Be aware of that we only know the height of some riders, and therefore we use an `Option`:
    let results: Vec<(String, u32, Option<f32>)> = graph.query("MATCH (r:Rider) RETURN r.name, r.birth_year, r.height")?;

    // That was just a demo; we don't need this graph anymore. Let's delete it from the database:
    //graph.delete()?;

    Ok(())

    Step 6. Run the current local package

     cargo run

    Step 7. Monitor the Graph query

     1633515550.109594 [0 172.17.0.1:55114] "GRAPH.QUERY" "MotoGP" "CREATE (dummy:__DUMMY_LABEL__)" "--compact"
    1633515550.111727 [0 172.17.0.1:55114] "GRAPH.QUERY" "MotoGP" "MATCH (dummy:__DUMMY_LABEL__) DELETE dummy" "--compact"
    1633515550.114948 [0 172.17.0.1:55114] "GRAPH.QUERY" "MotoGP" "CREATE (:Rider {name: 'Valentino Rossi', birth_year: 1979})-[:rides]->(:Team {name: 'Yamaha'}), (:Rider {name:'Dani Pedrosa', birth_year: 1985, height: 1.58})-[:rides]->(:Team {name: 'Honda'}), (:Rider {name:'Andrea Dovizioso', birth_year: 1986, height: 1.67})-[:rides]->(:Team {name: 'Ducati'})" "--compact"
    1633515550.118380 [0 172.17.0.1:55114] "GRAPH.QUERY" "MotoGP" "MATCH (r:Rider)-[:rides]->(t:Team) WHERE t.name = 'Yamaha' RETURN r.name, r.birth_year" "--compact"
    1633515550.120766 [0 172.17.0.1:55114] "GRAPH.QUERY" "MotoGP" "MATCH (r:Rider)-[:rides]->(t:Team) WHERE t.name = 'Yamaha' RETURN r.name, r.birth_year" "--compact"
    1633515550.122505 [0 172.17.0.1:55114] "GRAPH.QUERY" "MotoGP" "MATCH (r:Rider) RETURN r.name, r.birth_year, r.height" "--compact"
    1633515550.124045 [0 172.17.0.1:55114] "GRAPH.DELETE" "MotoGP"

    Step 8. Install RedisInsight

    Follow this link to install RedisInsight. For this demo, we will be using RedisInsight Docker container as shown below:

     docker run -d -v redisinsight:/db -p 8001:8001 redislabs/redisinsight:latest

    Step 8. Accessing RedisInsight

    Next, point your browser to http://localhost:8001.

    Step 9. Run the Graph Query

    You can use the limit clause to limit the number of records returned by a query:

     GRAPH.QUERY "MotoGP" "MATCH (r:Rider) RETURN r.name, r.birth_year, r.height"

    My Image

    References

    Redis Launchpad
    - + \ No newline at end of file diff --git a/howtos/redisgraphmovies/index.html b/howtos/redisgraphmovies/index.html index 3ee8c45123..112be9c954 100644 --- a/howtos/redisgraphmovies/index.html +++ b/howtos/redisgraphmovies/index.html @@ -4,7 +4,7 @@ Building Movies database app using RedisGraph and NodeJS | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    Building Movies database app using RedisGraph and NodeJS

    End-of-Life Notice

    Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

    End of support is scheduled for January 31, 2025.

    Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).

    IMDb(Internet Movie Database) is the world's most popular and authoritative source for information on movies, TV shows and celebrities. This application is an IMDB clone with basic account authentication and movie recommendation functionality. You will learn the power of RedisGraph and NodeJS to build a simple movie database.

    moviedb

    Tech Stack

    • Frontend - React
    • Backend - Node.js, Redis, RedisGraph

    Step 1. Install the pre-requisites

    • Node - v13.14.0+
    • NPM - v7.6.0+

    Step 2. Run Redis Stack Docker container

     docker run -d -p 6379:6379 redis/redis-stack

    Ensure that Docker container is up and running:

     docker ps
    CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
    fd5ef30f025a redis/redis-stack "redis-server --load…" 2 hours ago Up 2 hours 0.0.0.0:6379->6379/tcp nervous_buck

    Step 3. Run RedisInsight Docker container

     docker run -d -v redisinsight:/db -p 8001:8001 redislabs/redisinsight:latest

    Ensure that Docker container is up and runnig

     docker ps
    CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
    264db1706dcc redislabs/redisinsight:latest "bash ./docker-entry…" About an hour ago Up About an hour 0.0.0.0:8001->8001/tcp angry_shirley
    fd5ef30f025a redis/redis-stack "redis-server --load…" 2 hours ago Up 2 hours 0.0.0.0:6379->6379/tcp nervous_buck

    Step 4. Clone the repository

     git clone https://github.com/redis-developer/basic-redisgraph-movie-demo-app-nodejs

    Step 5. Setting up environment variables

    Copy .env.sample to .env and add the below details:

      REDIS_ENDPOINT_URL = "Redis server URI"
    REDIS_PASSWORD = "Password to the server"

    Step 6. Install the dependencies

     npm install

    Step 7. Run the backend server

     node app.js

    Step 8. Run the client

     cd client
    yarn install
    yarn start

    Step 9. Accessing the Movie app

    Open http://IP:3000 to access the movie app

    movieapp

    Step 10. Sign up for a new account

    moviedb

    Enter the details to create a new account:

    movieapp

    Step 11. Sign-in to movie app

    movieapp

    Step 12. Rate the movie

    movieapp

    Step 13. View the list of rated movie

    movieapp

    Step 14. View directed movie over RedisInsight

     GRAPH.QUERY "MovieApp" "MATCH (director:Director {tmdbId: \"4945\"})-[:DIRECTED]->(movie:Movie) RETURN DISTINCT movie,director"

    movieapp

    Step 15. Find movies where actor acted in.

    Run the below query under RedisGraph to find the author acted in a movie

     GRAPH.QUERY "MovieApp" "MATCH (actor:Actor {tmdbId: \"8537\"})-[:ACTED_IN_MOVIE]->(movie:Movie) RETURN DISTINCT movie,actor"

    movieapp

    Step 16. Store a user in a database

     CREATE (user:User {id: 32,
    username: "user", password: "hashed_password", api_key: "525d40da10be8ec75480"})
    RETURN user

    movieapp

    Step 17. Find a user by username

     MATCH (user:User {username: "user"}) RETURN user

    movieapp

    How it works?

    The app consumes the data provided by the Express API and presents it through some views to the end user, including:

    • Home page
    • Sign-up and Login pages
    • Movie detail page
    • Actor and Director detail page
    • User detail page

    Home page

    How it works

    The home page shows the genres and a brief listing of movies associated with them.

    How the data is stored

    Add a new genre:

     create (g:Genre{name:"Adventure"})

    Add a movie:

     create (m:Movie {
    url: "https://themoviedb.org/movie/862",
    id:232,
    languages:["English"],
    title:"Toy Story",
    countries:["USA"],
    budget:30000000,
    duration:81,
    imdbId:"0114709",
    imdbRating:8.3,
    imdbVotes:591836,
    movieId:42,
    plot:"...",
    poster:"https://image.tmd...",
    poster_image:"https://image.tmdb.or...",
    released:"1995-11-22",
    revenue:373554033,
    runtime:$runtime,
    tagline:"A cowboy doll is profoundly t...",
    tmdbId:"8844",
    year:"1995"})

    Set genre to a movie:

     MATCH (g:Genre), (m:Movie)
    WHERE g.name = "Adventure" AND m.title = "Toy Story"
    CREATE (m)-[:IN_GENRE]->(g)

    How the data is accessed

    Get genres:

     MATCH (genre:Genre) RETURN genre

    Get moves by genre:

     MATCH (movie:Movie)-[:IN_GENRE]->(genre)
    WHERE toLower(genre.name) = toLower("Film-Noir") OR id(genre) = toInteger("Film-Noir")
    RETURN movie

    Code example: Get movies with genre

     const getByGenre = function (session, genreId) {
    const query = [
    'MATCH (movie:Movie)-[:IN_GENRE]->(genre)',
    'WHERE toLower(genre.name) = toLower($genreId) OR id(genre) = toInteger($genreId)',
    'RETURN movie',
    ].join('\n');

    return session
    .query(query, {
    genreId,
    })
    .then((result) => manyMovies(result));
    };

    Sign-up and Login pages

    moviedb moviedb

    To be able to rate movies a user needs to be logged in: for that a basic JWT-based authentication system is implemented, where user details are stored in the RedisGraph for persistence.

    How the data is stored

    Store user in the database:

     CREATE (user:User {id: 32,
    username: "user", password: "hashed_password", api_key: "525d40da10be8ec75480"})
    RETURN user

    How the data is accessed

    Find by user name:

     MATCH (user:User {username: "user"}) RETURN user

    Code Example: Find user

     const me = function (session, apiKey) {
    return session
    .query('MATCH (user:User {api_key: $api_key}) RETURN user', {
    api_key: apiKey,
    })
    .then((foundedUser) => {
    if (!foundedUser.hasNext()) {
    throw {message: 'invalid authorization key', status: 401};
    }
    while (foundedUser.hasNext()) {
    const record = foundedUser.next();
    return new User(record.get('user'));
    }
    });
    };

    Movie detail page

    How it works

    On this page a user can rate the film and view the Actors/directors who participated in the production of the film.

    How the data is stored

    Associate actor with a movie:

     MATCH (m:Movie) WHERE m.title="Jumanji" CREATE (a:Actor :Person{
    bio:"Sample...",
    bornIn:"Denver, Colorado, USA",
    imdbId:"0000245",
    name:"Robin Williams",
    poster:"https://image.tmdb.org/t/p/w440_and_...",
    tmdbId:"2157",
    url:"https://themoviedb.org/person/2157"})-[r:ACTED_IN_MOVIE
    {role: "Alan Parrish"}]->(m)

    Associate director with a movie:

     MATCH (m:Movie) WHERE m.title="Dead Presidents" CREATE (d:Director :Person{
    bio: "From Wikipedia, the free e...",
    bornIn: "Detroit, Michigan, USA",
    imdbId: "0400436",
    name: "Albert Hughes",
    tmdbId: "11447",
    url: "https://themoviedb.org/person/11447"})-[r:DIRECTED]->(m)

    How the data is accessed

    Find movie by id with genre, actors and director:

     MATCH (movie:Movie {tmdbId: $movieId})
    OPTIONAL MATCH (movie)<-[my_rated:RATED]-(me:User {id: "e1e3991f-fe81-439e-a507-aa0647bc0b88"})
    OPTIONAL MATCH (movie)<-[r:ACTED_IN_MOVIE]-(a:Actor)
    OPTIONAL MATCH (movie)-[:IN_GENRE]->(genre:Genre)
    OPTIONAL MATCH (movie)<-[:DIRECTED]-(d:Director)
    WITH DISTINCT movie, my_rated, genre, d, a, r
    RETURN DISTINCT movie,
    collect(DISTINCT d) AS directors,
    collect(DISTINCT a) AS actors,
    collect(DISTINCT genre) AS genres

    Code Example: Get movie detail

     const getById = function (session, movieId, userId) {
    if (!userId) throw {message: 'invalid authorization key', status: 401};
    const query = [
    'MATCH (movie:Movie {tmdbId: $movieId})\n' +
    ' OPTIONAL MATCH (movie)<-[my_rated:RATED]-(me:User {id: $userId})\n' +
    ' OPTIONAL MATCH (movie)<-[r:ACTED_IN_MOVIE]-(a:Actor)\n' +
    ' OPTIONAL MATCH (movie)-[:IN_GENRE]->(genre:Genre)\n' +
    ' OPTIONAL MATCH (movie)<-[:DIRECTED]-(d:Director)\n' +
    ' WITH DISTINCT movie, my_rated, genre, d, a, r\n' +
    ' RETURN DISTINCT movie,\n' +
    ' collect(DISTINCT d) AS directors,\n' +
    ' collect(DISTINCT a) AS actors,\n' +
    ' collect(DISTINCT genre) AS genres',
    ].join(' ');
    return session
    .query(query, {
    movieId: movieId.toString(),
    userId: userId.toString(),
    })
    .then((result) => {
    if (result.hasNext()) {
    return _singleMovieWithDetails(result.next());
    }
    throw {message: 'movie not found', status: 404};
    });
    };

    Actor and Director detail page

    How it works

    How the data is accessed

    Find movies where actor acted in:

     MATCH (actor:Actor {tmdbId: "8537"})-[:ACTED_IN_MOVIE]->(movie:Movie)
    RETURN DISTINCT movie,actor

    Find movies directed by:

     MATCH (director:Director {tmdbId: "4945"})-[:DIRECTED]->(movie:Movie)
    RETURN DISTINCT movie,director

    Get movies directed by

     const getByDirector = function (session, personId) {
    const query = [
    'MATCH (director:Director {tmdbId: $personId})-[:DIRECTED]->(movie:Movie)',
    'RETURN DISTINCT movie,director',
    ].join('\n');

    return session
    .query(query, {
    personId,
    })
    .then((result) => manyMovies(result));
    };

    User detail page

    How it works

    Shows the profile info and movies which were rated by user

    How the data is stored

    Set rating for a movie:

     MATCH (u:User {id: 42}),(m:Movie {tmdbId: 231})
    MERGE (u)-[r:RATED]->(m)
    SET r.rating = "7"
    RETURN m

    How the data is accessed

    Get movies and user ratings:

     MATCH (:User {id: "d6b31131-f203-4d5e-b1ff-d13ebc06934d"})-[rated:RATED]->(movie:Movie)
    RETURN DISTINCT movie, rated.rating as my_rating

    Get rated movies for user

     const getRatedByUser = function (session, userId) {
    return session
    .query(
    'MATCH (:User {id: $userId})-[rated:RATED]->(movie:Movie) \
    RETURN DISTINCT movie, rated.rating as my_rating',
    {userId},
    )
    .then((result) =>
    result._results.map((r) => new Movie(r.get('movie'), r.get('my_rating'))),
    );
    };

    Data types:

    • The data is stored in various keys and various relationships.
      • There are 5 types of data
        • User
        • Director
        • Actor
        • Genre
        • Movie

    Each type has its own properties

    • Actor: id, bio, born , bornIn, imdbId, name, poster, tmdbId, url
    • Genre: id, name
    • Director: id, born, bornIn, imdbId, name, tmdbId, url
    • User: id, username, password, api_key
    • Movie: id, url, languages, countries, budget, duration, imdbId, imdbRating, indbVotes, movieId, plot, poster, poster_image, released, revenue, runtime, tagline, tmdbId, year

    And there are 4 types of relationship:

    • User-RATED->Movie
    • Director-DIRECTED->Movie
    • Actor-ACTED_IN_MOVIE->Movie
    • Movie-IN_GENRE->Genre

    References

    - + \ No newline at end of file diff --git a/howtos/redisjson/getting-started/index.html b/howtos/redisjson/getting-started/index.html index 39676ff6cd..df7fde8bf0 100644 --- a/howtos/redisjson/getting-started/index.html +++ b/howtos/redisjson/getting-started/index.html @@ -4,7 +4,7 @@ Storing and Querying JSON documents using Redis Stack | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    Storing and Querying JSON documents using Redis Stack


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Redis Stack is an extension of Redis that adds modern data models and processing engines to provide a complete developer experience. Redis Stack provides a simple and seamless way to access different data models such as full-text search, document store, graph, time series, and probabilistic data structures enabling developers to build any real-time data application.

    In this tutorial, you will see how Redis Stack can help you in storing and querying JSON documents.

    Step 1. Create a free Cloud account

    Create your free Redis Enterprise Cloud account. Once you click on “Get Started”, you will receive an email with a link to activate your account and complete your signup process.

    tip

    For a limited time, use TIGER200 to get $200 credits on Redis Enterprise Cloud and try all the advanced capabilities!

    🎉 Click here to sign up

    Step 2. Create Your database

    Choose your preferred cloud vendor. Select the region and then click "Let's start free" to create your free database automatically.

    tip

    If you want to create a custom database with your preferred name and type of Redis, click "Create a custom database" option shown in the image.

    create database

    Step 3. Verify the database details

    You will be provided with Public endpoint URL and "Redis Stack" as the type of database with the list of features that comes by default.

    verify database

    Step 4. Using RedisInsight

    RedisInsight is a visual tool that lets you do both GUI- and CLI-based interactions with your Redis database, and so much more when developing your Redis based application. It is a fully-featured pure Desktop GUI client that provides capabilities to design, develop and optimize your Redis application. It works with any cloud provider as long as you run it on a host with network access to your cloud-based Redis server. It makes it easy to discover cloud databases and configure connection details with a single click. It allows you to automatically add Redis Enterprise Software and Redis Enterprise Cloud databases.

    Follow this link to install RedisInsight v2 on your local system. Assuming that you already have RedisInsight v2 installed on your MacOS, you can browse through the Applications and click "RedisInsight-v2" to bring up the Redis Desktop GUI tool.

    Step 5. Enter Redis Enterprise Cloud details

    Add the Redis Enterprise cloud database endpoint, port and password.

    access redisinsight

    Step 6. Verify the database under RedisInsight dashboard

    database details

    Step 7. Getting Started with Redis JSON

    The following steps use some basic Redis JSON commands. You can run them from the Redis command-line interface (redis-cli) or use the CLI available in RedisInsight.

    To interact with Redis JSON, you will most often use the JSON.SET and JSON.GET commands. Before using Redis JSON, you should familiarize yourself with its commands and syntax as detailed in the documentation: Redis JSON Commands.

    Let’s go ahead and test drive some JSON-specific operations for setting and retrieving a Redis key with a JSON value:

    • Scalar
    • Objects (including nested objects)
    • Arrays of JSON objects
    • JSON nested objects

    Scalar

    Under Redis JSON, a key can contain any valid JSON value. It can be scalar, objects or arrays. JSON scalar is basically a string. You will have to use the JSON.SET command to set the JSON value. For new Redis keys the path must be the root, so you will use “.” path in the example below. For existing keys, when the entire path exists, the value that it contains is replaced with the JSON value. Here you will use JSON.SET to set the JSON scalar value to “Hello JSON!” Scalar will contain a string that holds “Hello JSON!”

    Command:
    JSON.SET greetings .  ' "Hello JSON!" '
    Result:
    OK

    Use JSON.GET to return the value at path in JSON serialized form:

    Command:
    JSON.GET greetings
    Result:
    "\"Hello JSON!\""

    Objects

    Let’s look at a JSON object example. A JSON object contains data in the form of a key-value pair. The keys are strings and the values are the JSON types. Keys and values are separated by a colon. Each entry (key-value pair) is separated by a comma. The { (curly brace) represents the JSON object:

    {
    "employee": {
    "name": "alpha",
    "age": 40,
    "married": true
    }
    }

    Here is the command to insert JSON data into Redis:

    Command:
    JSON.SET employee_profile $ '{ "employee": { "name": "alpha", "age": 40,"married": true }  } '
    note

    Please note that the above command works for 2.0+ release of Redis JSON. If you are using the older version of Redis JSON, you can replace "$" with "."

    Result:
    "OK"

    The subcommands below change the reply’s format and are all set to the empty string by default: INDENT sets the indentation string for nested levels . NEWLINE sets the string that’s printed at the end of each line. * SPACE sets the string that’s put between a key and a value:

    Command:
    JSON.GET employee_profile
    Result:
    "{\"employee\":{\"name\":\"alpha\",\"age\":40,\"married\":true}}"

    Retrieving a part of JSON document

    You can also retrieve a part of the JSON document from Redis. In the example below, “.ans” can be passed in the commandline to retrieve the value 4:

    Command:
    JSON.SET object . '{"foo":"bar", "ans":"4" }'
    Result:
    "OK"
    Command:
    JSON.GET object
    Result:
    "{\"foo\":\"bar\",\"ans\":\"4\"}"
    Command:
    JSON.GET object .ans
    Results:
    "\"4\""

    Retrieving the type of JSON data

    JSON.TYPE reports the type of JSON value at path and path defaults to root if not provided. If the key or path do not exist, null is returned.

    Command:
    JSON.TYPE employee_profile
    Result:
    "Object"

    JSON arrays of objects

    The JSON array represents an ordered list of values. A JSON array can store multiple values, including strings, numbers, or objects. In JSON arrays, values must be separated by a comma. The [ (square bracket) represents the JSON array. Let’s look at a simple JSON array example with four objects:

    {"employees":[
    {"name":"Alpha", "email":"alpha@gmail.com", "age":23},
    {"name":"Beta", "email":"beta@gmail.com", "age":28},
    {"name":"Gamma", "email":"gamma@gmail.com", "age":33},
    {"name":"Theta", "email":"theta@gmail.com", "age":41}
    ]}
    Command:
    JSON.SET testarray .  '{"employees":[         {"name":"Alpha", "email":"alpha@gmail.com", "age":23},         {"name":"Beta", "email":"beta@gmail.com", "age":28},       {"name":"Gamma", "email":"gamma@gmail.com", "age":33},         {"name":"Theta", "email":"theta@gmail.com", "age":41}    ]}   '
    Result:
    "OK"
    Command:
    JSON.GET testarray
    Result:
    "{\"employees\":[{\"name\":\"Alpha\",\"email\":\
    alpha@gmail.com

    \",\"age\":23},{\"name\":\"Beta\",\"email\":\"beta@gmail.com....

    JSON nested objects

    A JSON object can also have another object. Here is a simple example of a JSON object having another object nested in it:

    Command:
    >> JSON.SET employee_info . ' { "firstName": "Alpha",         "lastName": "K", "age": 23,        "address" : {            "streetAddress": "110 Fulbourn Road Cambridge",  "city": "San Francisco", "state": "California", "postalCode": "94016"  } } '
    Command:
    >> JSON.GET employee_info
    Result:
    "{\"firstName\":\"Alpha\",\"lastName\":\"K\",\"age\":23,\"address\":{\"streetAddress\":\"110 Fulbourn Road Cambridge\",\"city\":\"San Francisco\",\"state\":\"California\",\"postalCode\":\"94016\"}}"

    Next Steps

    Redis Launchpad
    - + \ No newline at end of file diff --git a/howtos/redisjson/index.html b/howtos/redisjson/index.html index 29503700aa..10d15aabf2 100644 --- a/howtos/redisjson/index.html +++ b/howtos/redisjson/index.html @@ -4,7 +4,7 @@ Redis JSON Tutorial | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Redis JSON Tutorial

    The following links provides you with the available options to get started with Redis JSON feature

    How to store and query JSON documents using Redis Stack
    How to store JSON documents in Redis with Python
    How to cache JSON data in Redis with NodeJS
    How to cache JSON data in Redis with Ruby
    How to cache JSON data in Redis with Go
    How to cache JSON data in Redis with Java
    How to visualize and edit your JSON data using RedisInsight
    How to store and retrieve Nested JSON document in Redis
    How to index JSON document using Redis
    Basic Redis JSON commands
    Implementing a Shopping Cart using NodeJS & Redis
    How to store and retrieve JSON document using NodeJS
    - + \ No newline at end of file diff --git a/howtos/redisjson/json-using-redisearch/index.html b/howtos/redisjson/json-using-redisearch/index.html index bf5d2e8890..fd3b65cf34 100644 --- a/howtos/redisjson/json-using-redisearch/index.html +++ b/howtos/redisjson/json-using-redisearch/index.html @@ -4,7 +4,7 @@ How to index JSON document using Redis Search | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    How to index JSON document using Redis Search

    - + \ No newline at end of file diff --git a/howtos/redisjson/jsonind-document/index.html b/howtos/redisjson/jsonind-document/index.html index 1601480146..b3c95d4ad7 100644 --- a/howtos/redisjson/jsonind-document/index.html +++ b/howtos/redisjson/jsonind-document/index.html @@ -4,7 +4,7 @@ Indexing JSON document using Redis | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    Indexing JSON document using Redis


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Redis JSON 2.0 Private Preview was announced for the first time during RedisConf 2021. With this newer version, Redis JSON will fully support JSONPath expressions and Active-Active geo-distribution. The Active-Active implementation is based on Conflict-free Replicated Data-Types (CRDT).

    Prior to v2.2, Redis Search and Query only supported hashes. Going forward, it will support JSON documents. This opens a powerful new set of document-based indexing use cases. In addition, Redis now provides query profiling. This will empower developers to understand and optimize their search queries, increasing their developer experience.

    Redis Search and Query has been providing indexing and search capabilities on hashes. Under the hood, Redis JSON 2.0 exposes an internal public API. Internal, because this API is exposed to other features running inside a Redis node. Public, because any feature can consume this API. So does Redis Search 2.2 ! In addition to indexing hashes, Redis Search also indexes JSON. To index JSON, you must use the Redis JSON feature.

    By exposing its capabilities to other features, Redis JSON gives Redis Search the ability to index JSON documents so users can now find documents by indexing and querying the content. These combined features give you a powerful, low latency, JSON-oriented document database!

    Prerequisite:

    • Redis 6.x or later
    • Redis Search 2.2 or later
    • Redis JSON 2.0 or later

    Step 1. Run the "latest" tagged Redis Stack container

    This Docker image contains Redis together with the main Redis features, including JSON, Search and Query. You'll need the latest tag of the image, which you can access as follows:

     docker run -p 6379:6379 redis/redis-stack:latest
     info modules
    # Modules
    module:name=graph,ver=20406,api=1,filters=0,usedby=[],using=[],options=[]
    module:name=timeseries,ver=10410,api=1,filters=0,usedby=[],using=[],options=[]
    module:name=bf,ver=20205,api=1,filters=0,usedby=[],using=[],options=[]
    module:name=ReJSON,ver=20000,api=1,filters=0,usedby=[search],using=[],options=[]
    module:name=search,ver=20200,api=1,filters=0,usedby=[],using=[ReJSON],options=[]

    Step 2. Create an Index

    Let's start by creating an index.

    We can now specify ON JSON to inform Redis that we want to index JSON documents. Then, on the SCHEMA part, you can provide JSONPath expressions. The result of each JSON Path expression is indexed and associated with a logical name ( attribute ). This attribute (previously called field ) is used in the query part.

    This is the basic syntax for indexing a JSON document:

    Syntax:
     FT.CREATE {index_name} ON JSON SCHEMA {json_path} AS {attribute} {type}
    Command:
     FT.CREATE userIdx ON JSON SCHEMA $.user.name AS name TEXT $.user.email AS email  TAG

    Step 3. Populate the database with JSON document

    We should first populate the database with a JSON document using the JSON.SET command. In our example we are going to use the following JSON document:

    {
    "user": {
    "name": "Paul John",
    "email": "paul.john@example.com",
    "age": "42",
    "country": "London"
    }
    }
    JSON.SET myuser $ '{ "user":{"name": "Paul John", "email": "paul.john@example.com", "age": "4", "country": "London" }}'

    Because indexing is synchronous, the document will be visible on the index as soon as the JSON.SET command returns. Any subsequent query matching the indexed content will return the document

    Step 4. Indexing the database with JSON document

    This new version includes a comprehensive support of JSONPath. It is now possible to use all the expressiveness of JSONPath expressions.

    To create a new index, we use the FT.CREATE command. The schema of the index now accepts JSONPath expressions. The result of the expression is indexed and associated with an attribute (here: title).

    FT.CREATE myIdx ON JSON SCHEMA $.title AS title TEXT

    We can now do a search query and find our JSON document using FT.SEARCH:

    Command:
     FT.SEARCH userIdx '@name:(John)'
    Result:
     1) (integer) 1
    2) "myuser"
    3) 1) "$"
    2) "{\"user\":{\"name\":\"Paul John\",\"email\":\"paul.john@example.com\",\"age\":\"4\",\"country\":\"London\"}}"

    We just saw that, by default, FT.SEARCH returns the whole document. We can also return only specific attribute (here name)

      FT.SEARCH userIdx '@name:(John)' RETURN 1 name
      1) (integer) 1
    2) "myuser"
    3) 1) "name"
    2) "Paul John"

    Step 5. Projecting using JSON Path expressions

    The RETURN parameter also accepts a JSON Path expression which let us extract any part of the JSON document. In this example, we return the result of the JSON Path expression $.user.hp .

    Command:
     FT.SEARCH userIdx '@name:(John)' RETURN 1 $.user.email
    Result:
     1) (integer) 1
    2) "myuser"
    3) 1) "$.user.email"
    2) "paul.john@example.com"
    info

    It is not possible to index JSON object and JSON arrays. To be indexed, a JSONPath expression must return a single scalar value (string or number). If the JSONPath expression returns an object or an array, it will be ignored.

    Given the following document:

     {

    "name": "Paul John",
    “address": [
    "Orbital Park",
    " Hounslow"
    ],
    "pincode": "TW4 6JS"
    }

    If we want to index the array under the address key, we have to create two fields:

    Command:
     FT.CREATE orgIdx ON JSON SCHEMA $.address[0] AS a1 TEXT $.address[1] AS a2 TEXT

    It's time to index the document:

    Command:
     JSON.SET org:1 $ '{ "name": "Home Address", "address": [ "Orbital Park","Hounslow" ], "pincode": "TW4 6JS" }'

    We can now search in the address:

    Command:
     FT.SEARCH orgIdx "Orbital Park"
    Result:
     FT.SEARCH orgIdx "Orbital Park"
    1) (integer) 1
    2) "org:1"
    3) 1) "$"
    2) "{\"name\":\"Home Address\",\"address\":[\"Orbital Park\",\"Hounslow\"],\"pincode\":\"TW4 6JS\"}"

    References

    - + \ No newline at end of file diff --git a/howtos/redisjson/jsonindex-document/index.html b/howtos/redisjson/jsonindex-document/index.html index 002ab81632..45d2a25943 100644 --- a/howtos/redisjson/jsonindex-document/index.html +++ b/howtos/redisjson/jsonindex-document/index.html @@ -4,7 +4,7 @@ How to index JSON documents using Redis JSON & Search | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    How to index JSON documents using Redis JSON & Search

    Redis JSON 2.0 Private Preview was announced for the first time during RedisConf 2021. With this newer version, Redis JSON will fully support JSONPath expressions and Active-Active geo-distribution. The Active-Active implementation is based on Conflict-free Replicated Data-Types (CRDT).

    Prior to v2.2, Redis Search only supported Redis hashes. Going forward, Redis Search will support Redis JSON documents. This opens a powerful new set of document-based indexing use cases. In addition, Redis Search now provides query profiling. This will empower developers to understand and optimize their Redis Search queries, increasing their developer experience.

    Redis Search has been providing indexing and search capabilities on hashes. Under the hood, Redis JSON 2.0 exposes an internal public API. Internal, because this API is exposed to other modules running inside a Redis node. Public, because any module can consume this API. So does Redis Search! In addition to indexing Redis hashes, Redis Search also indexes JSON. To index JSON, you must use the Redis JSON module.

    By exposing its capabilities to other modules, Redis JSON gives Redis Search the ability to index JSON documents so users can now find documents by indexing and querying the content. These combined modules give you a powerful, low latency, JSON-oriented document database!

    Prerequisites:

    • Redis 6.x or later
    • Redis Search 2.2 or later
    • Redis JSON 2.0 or later

    Step 1. Run the "latest" tagged Redis Stack container

    This Docker image contains Redis together with the main Redis modules, including Redis Search and Redis JSON. You'll need the latest tag of the image, which you can access as follows:

     docker run -p 6379:6379 redis/redis-stack:latest
     info modules
    # Modules
    module:name=graph,ver=20406,api=1,filters=0,usedby=[],using=[],options=[]
    module:name=timeseries,ver=10410,api=1,filters=0,usedby=[],using=[],options=[]
    module:name=bf,ver=20205,api=1,filters=0,usedby=[],using=[],options=[]
    module:name=ReJSON,ver=20000,api=1,filters=0,usedby=[search],using=[],options=[]
    module:name=search,ver=20200,api=1,filters=0,usedby=[],using=[ReJSON],options=[]

    Step 2. Create an Index

    Let's start by creating an index.

    We can now specify ON JSON to inform Redis Search that we want to index JSON documents. Then, on the SCHEMA part, you can provide JSONPath expressions. The result of each JSON Path expression is indexed and associated with a logical name ( attribute ). This attribute (previously called field ) is used in the query part.

    This is the basic syntax for indexing a JSON document:

    Syntax:
     FT.CREATE {index_name} ON JSON SCHEMA {json_path} AS {attribute} {type}
    Command:
     FT.CREATE userIdx ON JSON SCHEMA $.user.name AS name TEXT $.user.email AS email  TAG

    Step 3. Populate the database with JSON document

    We should first populate the database with a JSON document using the JSON.SET command. In our example we are going to use the following JSON document:

    {
    "user": {
    "name": "Paul John",
    "email": "paul.john@example.com",
    "age": "42",
    "country": "London"
    }
    }
    JSON.SET myuser $ '{ "user":{"name": "Paul John", "email": "paul.john@example.com", "age": "4", "country": "London" }}'

    Because indexing is synchronous, the document will be visible on the index as soon as the JSON.SET command returns. Any subsequent query matching the indexed content will return the document

    Step 4. Indexing the database with JSON document

    This new version includes a comprehensive support of JSONPath. It is now possible to use all the expressiveness of JSONPath expressions.

    To create a new index, we use the FT.CREATE command. The schema of the index now accepts JSONPath expressions. The result of the expression is indexed and associated with an attribute (here: title).

    FT.CREATE myIdx ON JSON SCHEMA $.title AS title TEXT

    We can now do a search query and find our JSON document using FT.SEARCH:

    Command:
     FT.SEARCH userIdx '@name:(John)'
    Result:
     1) (integer) 1
    2) "myuser"
    3) 1) "$"
    2) "{\"user\":{\"name\":\"Paul John\",\"email\":\"paul.john@example.com\",\"age\":\"4\",\"country\":\"London\"}}"

    We just saw that, by default, FT.SEARCH returns the whole document. We can also return only specific attribute (here name)

      FT.SEARCH userIdx '@name:(John)' RETURN 1 name
      1) (integer) 1
    2) "myuser"
    3) 1) "name"
    2) "Paul John"

    Step 5. Projecting using JSON Path expressions

    The RETURN parameter also accepts a JSON Path expression which let us extract any part of the JSON document. In this example, we return the result of the JSON Path expression $.user.hp .

    Command:
     FT.SEARCH userIdx '@name:(John)' RETURN 1 $.user.email
    Result:
     1) (integer) 1
    2) "myuser"
    3) 1) "$.user.email"
    2) "paul.john@example.com"
    info

    It is not possible to index JSON object and JSON arrays.

     {
    "user": {
    "name": "Paul John",
    "email": "paul.john@example.com",
    "age": "42",
    "country": "London",
    “address": [
    "Orbital Park",
    " Hounslow"
    ],
    "pincode": "TW4 6JS"
    }
    }
    }
    Command:
     JSON.SET myuser $ '{ "user": { "name": "Paul John", "email": "paul.hojn@example.com", "age" :"40", "country": "London", "address": [ "Orbital Park","Hounslow" ], "pincode": "TW4 6JS" }}'

    References

    - + \ No newline at end of file diff --git a/howtos/redisjson/redisjson-cheatsheet/index.html b/howtos/redisjson/redisjson-cheatsheet/index.html index d2013efd98..6c61751e1e 100644 --- a/howtos/redisjson/redisjson-cheatsheet/index.html +++ b/howtos/redisjson/redisjson-cheatsheet/index.html @@ -4,7 +4,7 @@ Redis JSON Cheatsheet | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Redis JSON Cheatsheet


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    CommandPurposeSyntax
    Return the value at path in JSON serialized formJSON.GET <key>
    Sets the JSON value at path in keyJSON.SET <key> <path> <json> [NX | XX]
    Returns the values at path from multiple keyJSON.MGET <key> [key ...] <path>
    Report the type of JSON value at path .JSON.TYPE <key> [path]
    Increments the number value stored at path by numberJSON.NUMINCRBY <key> <path> <number>
    Multiplies the number value stored at path by numberJSON.NUMMULTBY <key> <path> <number>
    Append the json-string value(s) the string at pathJSON.STRAPPEND <key> [path] <json-string>
    Append the json value(s) into the array at path after the last element in itJSON.ARRAPPEND <key> <path> <json> [json ...]
    Report the length of the JSON String at path in keyJSON.STRLEN <key> [path]
    Report the length of the JSON Array at path in keyJSON.ARRLEN <key> [path]
    Insert the json value(s) into the array at path before the index (shifts to the right)JSON.ARRINSERT <key> <path> <index> <json> [json ...]
    Search for the first occurrence of a scalar JSON value in an arrayJSON.ARRINDEX <key> <path> <json-scalar> [start [stop]]
    Remove and return element from the index in the arrayJSON.ARRPOP <key> [path [index]]
    Trim an array so that it contains only the specified inclusive range of elementsJSON.ARRTRIM <key> <path> <start> <stop>
    Return the keys in the object that's referenced by pathJSON.OBJKEYS <key> [path]
    Report the number of keys in the JSON Object at path in keyJSON.OBJLEN <key> [path]
    Report informationJSON.DEBUG <subcommand & arguments>
    Return the JSON in key in Redis Serialization Protocol (RESP)JSON.RESP <key> [path]
    An Alias for JSON.DELJSON.DEL <key> [path]
    - + \ No newline at end of file diff --git a/howtos/redisjson/shoppingcart/index.html b/howtos/redisjson/shoppingcart/index.html index 5d6e851ee3..6d62d90717 100644 --- a/howtos/redisjson/shoppingcart/index.html +++ b/howtos/redisjson/shoppingcart/index.html @@ -4,7 +4,7 @@ How to build a Shopping cart app using NodeJS and Redis | The Home of Redis Developers - + @@ -22,7 +22,7 @@ The most interesting part, at least for now, is located in the src directory(directory structure is shown below):

    The main.js file is the main JavaScript file of the application, which will load all common elements and call the App.vue main screen. The App.vue is a file that contains in the HTML, CSS, and JavaScript for a specific page or template. As an entry point for the application, this part is shared by all screens by default, so it is a good place to write the notification-client piece in this file. The public/index.html is the static entry point from where the DOM will be loaded.

    Directory Structure:

    % tree
    .
    ├── App.vue
    ├── assets
    │ ├── RedisLabs_Illustration.svg
    │ └── products
    │ ├── 1f1321bb-0542-45d0-9601-2a3d007d5842.jpg
    │ ├── 42860491-9f15-43d4-adeb-0db2cc99174a.jpg
    │ ├── 63a3c635-4505-4588-8457-ed04fbb76511.jpg
    │ ├── 6d6ca89d-fbc2-4fc2-93d0-6ee46ae97345.jpg
    │ ├── 97a19842-db31-4537-9241-5053d7c96239.jpg
    │ ├── e182115a-63d2-42ce-8fe0-5f696ecdfba6.jpg
    │ ├── efe0c7a3-9835-4dfb-87e1-575b7d06701a.jpg
    │ ├── f5384efc-eadb-4d7b-a131-36516269c218.jpg
    │ ├── f9a6d214-1c38-47ab-a61c-c99a59438b12.jpg
    │ └── x341115a-63d2-42ce-8fe0-5f696ecdfca6.jpg
    ├── components
    │ ├── Cart.vue
    │ ├── CartItem.vue
    │ ├── CartList.vue
    │ ├── Info.vue
    │ ├── Product.vue
    │ ├── ProductList.vue
    │ └── ResetDataBtn.vue
    ├── config
    │ └── index.js
    ├── main.js
    ├── plugins
    │ ├── axios.js
    │ └── vuetify.js
    ├── store
    │ ├── index.js
    │ └── modules
    │ ├── cart.js
    │ └── products.js
    └── styles
    └── styles.scss

    8 directories, 27 files

    In the client directory, under the subdirectory src, open the file App.vue. You will see the below content:

    <template>
    <v-app>
    <v-container>
    <div class="my-8 d-flex align-center">
    <div class="pa-4 rounded-lg red darken-1">
    <v-icon color="white" size="45">mdi-cart-plus</v-icon>
    </div>
    <h1 class="ml-6 font-weight-regular">Shopping Cart demo</h1>
    </div>
    </v-container>

    <v-container>
    <v-row>
    <v-col cols="12" sm="7" md="8">
    <info />
    <product-list :products="products" />
    </v-col>
    <v-col cols="12" sm="5" md="4" class="d-flex flex-column">
    <cart />
    <reset-data-btn class="mt-6" />
    </v-col>
    </v-row>

    <v-footer class="mt-12 pa-0">
    © Copyright 2021 | All Rights Reserved Redis
    </v-footer>
    </v-container>
    </v-app>
    </template>

    <script>
    import { mapGetters, mapActions } from 'vuex';
    import Cart from '@/components/Cart';
    import ProductList from '@/components/ProductList';
    import ResetDataBtn from '@/components/ResetDataBtn.vue';
    import Info from '@/components/Info';

    export default {
    name: 'App',

    components: {
    ProductList,
    Cart,
    ResetDataBtn,
    Info
    },

    computed: {
    ...mapGetters({
    products: 'products/getProducts'
    })
    },

    async created() {
    await this.fetchProducts();
    },

    methods: {
    ...mapActions({
    fetchProducts: 'products/fetch'
    })
    }
    };
    </script>

    This is client-side code. Here API returns, among other things, links to icons suitable for use on Maps. If you follow the flow through, you’ll see the map markers are loading those icons directly using the include URLs.

    Running/Testing the web client

    $ cd client
    $ npm run serve

    > redis-shopping-cart-client@1.0.0 serve
    > vue-cli-service serve

    INFO Starting development server...
    98% after emitting CopyPlugin

    DONE Compiled successfully in 7733ms 7:15:56 AM


    App running at:
    - Local: http://localhost:8081/
    - Network: http://192.168.43.81:8081/

    Note that the development build is not optimized.
    To create a production build, run npm run build.

    Let us click on the first item “256GB Pendrive” and try to check out this product. Once you add it to the cart, you will see the below output using redis-cli monitor command:

    1613320256.801562 [0 172.22.0.1:64420] "json.get" "product:97a19842-db31-4537-9241-5053d7c96239"
    1613320256.803062 [0 172.22.0.1:64420] "hget"
    ...
    1613320256.805950 [0 172.22.0.1:64420] "json.set" "product:97a19842-db31-4537-9241-5053d7c96239" "." "{\"id\":\"97a19842-db31-4537-9241-5053d7c96239\",\"name\":\"256BG Pendrive\",\"price\":\"60.00\",\"stock\":1}"
    1613320256.807792 [0 172.22.0.1:64420] "set" "sess:Ii9njXZd6zeUViL3tKJimN5zU7Samfze"
    ...
    1613320256.823055 [0 172.22.0.1:64420] "scan" "0" "MATCH" "product:*"
    ...
    1613320263.232527 [0 172.22.0.1:64420] "hgetall" "cart:bdee1606395f69985e8f8e01d3ada8c4"
    1613320263.233752 [0 172.22.0.1:64420] "set" "sess:gXk5K9bobvrR790-HFEoi3bQ2kP9YmjV" "{\"cookie\":{\"originalMaxAge\":10800000,\"expires\":\"2021-02-14T19:31:03.233Z\",\"httpOnly\":true,\"path\":\"/\"},\"cartId\":\"bdee1606395f69985e8f8e01d3ada8c4\"}" "EX" "10800"
    1613320263.240797 [0 172.22.0.1:64420] "scan" "0" "MATCH" "product:*"
    1613320263.241908 [0 172.22.0.1:64420] "scan" "22" "MATCH" "product:*"

    "{\"cookie\":{\"originalMaxAge\":10800000,\"expires\":\"2021-02-14T19:31:03.254Z\",\"httpOnly\":true,\"path\":\"/\"},\"cartId\":\"4bc231293c5345370f8fab83aff52cf3\"}" "EX" "10800"

    Shopping Cart

    Conclusion

    Storing shopping cart data in Redis is a good idea because it lets you retrieve the data very fast at any time and persist this data if needed. As compared to cookies that store the entire shopping cart data in session that is bloated and relatively slow in operation, storing the shopping cart data in Redis speeds up the shopping cart’s read and write performance , thereby improving the user experience.

    Reference

    - + \ No newline at end of file diff --git a/howtos/redisjson/storing-complex-json-document/index.html b/howtos/redisjson/storing-complex-json-document/index.html index 2aa9c9d7dc..1aae99eaf3 100644 --- a/howtos/redisjson/storing-complex-json-document/index.html +++ b/howtos/redisjson/storing-complex-json-document/index.html @@ -4,7 +4,7 @@ Storing and retrieving Nested JSON document | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Storing and retrieving Nested JSON document


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    JSON(a.k.a JavaScript Object Notation) is a format for sharing data. A JSON object is a key-value data format that is typically rendered in curly braces. When you’re working with JSON, you’ll likely see JSON objects in a .json file, but they can also exist as a JSON object or string within the context of a program.

    Nested JSON is a JSON file with a big portion of its values being other JSON objects. Compared with Simple JSON, Nested JSON provides higher clarity in that it decouples objects into different layers, making it easier to maintain.

    Example of Nested JSON object

    employee = {
    'name': "Paul",
    'Age': '25',
    'Location': "USA",
    'Address':
    {
    "longitude": "-113.6335371",
    "latitude": "37.1049502",
    "postal code": "90266"
    }
    }

    Follow the below steps to understand how nested JSON objects can be imported into Redis database:

    Step 1. Run Redis Docker container

     docker run -p 6379:6379 --name redis-redisjson redislabs/rejson:latest

    Step 2. Verify if JSON feature is loaded

     redis-cli
    127.0.0.1:6379> info modules
    # Modules
    module:name=ReJSON,ver=10007,api=1,filters=0,usedby=[],using=[],options=[]
    127.0.0.1:6379>

    Step 3. Nested JSON

    Below is a python code for nested JSON document:

     import redis
    import json

    employee = {
    'name': "Paul",
    'Age': '25',
    'Location': "USA",
    'Address':
    {
    "longitude": "-113.6335371",
    "latitude": "37.1049502",
    "postal code": "90266"
    }
    }
    r = redis.StrictRedis()
    r.execute_command('JSON.SET', 'record', '.', json.dumps(employee))
    reply = json.loads(r.execute_command('JSON.GET', 'record'))

    Copy the code and save it in a file called employee.py

    Step 4. Load Redis JSON

     pip  install rejson

    Step 5. Execute the python script

    Execute the below script and ensure that it executes successfully.

     python3 employee.py

    Step 6. Verify the JSON objects gets added to Redis

     redis-cli
    127.0.0.1:6379> JSON.GET record
    "{\"name\":\"Paul\",\"Age\":\"25\",\"Location\":\"USA\",\"Address\":[{\"longitude\":\"-113.6335371\",\"latitude\":\"37.1049502\",\"postal code\":\"90266\"}]}"

    Step 7. Fetching the specific fields

    In case you want to fetch specific filed (like address), then the code would look like this:

     import redis
    import json

    employee = {
    'name': "Paul",
    'Age': '25',
    'Location': "USA",
    'Address':
    {
    "longitude": "-113.6335371",
    "latitude": "37.1049502",
    "postal code": "90266"
    }

    }
    r = redis.StrictRedis()
    r.execute_command('JSON.SET', 'record', '.', json.dumps(employee))
    reply = json.loads(r.execute_command('JSON.GET', 'record', '.Address.longitude'))

    Step 8. Verifying the results

      redis-cli
    127.0.0.1:6379> JSON.GET record .Address.longitude
    "\"-113.6335371\""

    References

    - + \ No newline at end of file diff --git a/howtos/redisjson/storing-json-using-nodejs/index.html b/howtos/redisjson/storing-json-using-nodejs/index.html index 2d5caf75f5..8a1dfb52b1 100644 --- a/howtos/redisjson/storing-json-using-nodejs/index.html +++ b/howtos/redisjson/storing-json-using-nodejs/index.html @@ -4,7 +4,7 @@ How to store and retrieve JSON documents using Node.js | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    How to store and retrieve JSON documents using Node.js


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis
    Profile picture for Simon Prickett
    Author:
    Simon Prickett, Principal Developer Advocate at Redis

    Imagine that you're building a social network application where users can "check in" at different locations and give them a star rating, say from 0 for an awful experience through 5 to report that they had the best time ever there! When designing your application, you determined that there's a need to manage data about three main entities:

    • Users
    • Locations
    • Checkins

    Let's look at what we're storing about each of these entities. As we're using Redis as our only data store, we'll also consider how they map to Redis data types...

    Users

    We'll represent each user as a flat map of name/value pairs with no nested objects. As we'll see later on, this maps nicely to a Redis Hash. Here's a JSON representation of the schema we'll use to represent each user:

    {
    "id": 99,
    "firstName": "Isabella",
    "lastName": "Pedersen",
    "email": "isabella.pedersen@example.com",
    "password": "xxxxxx1",
    "numCheckins": 8073,
    "lastCheckin": 1544372326893,
    "lastSeenAt": 138
    }

    We've given each user an ID and we're storing basic information about them. Also, we’ll encrypt their password using bcrypt when we load the sample data into Redis.

    For each user, we'll keep track of the total number of checkins that they've submitted to the system, and the timestamp and location ID of their most recent checkin so that we know where and when they last used the system.

    Locations

    For each location that users can check in at, we're going to maintain two types of data. The first of these is also a flat map of name/value pairs, containing summary information about the location:

    {
    "id": 138,
    "name": "Stacey's Country Bakehouse",
    "category": "restaurant",
    "location": "-122.195447,37.774636",
    "numCheckins": 170,
    "numStars": 724,
    "averageStars": 4
    }

    We've given each location an ID and a category—we'll use the category to search for locations by type later on. The "location" field stores the coordinates in longitude, latitude format… this is the opposite from the usual latitude, longitude format. We'll see how to use this to perform GeoSpatial searches later when we look at the Redis Search and Query feature.

    For each location, we're also storing the total number of checkins that have been recorded there by all of our users, the total number of stars that those checkins gave the location, and an average star rating per checkin for the location.

    The second type of data that we want to maintain for each location is what we'll call "location details". These take the form of more structured JSON documents with nested objects and arrays. Here's an example for location 138, Stacey's Country Bakehouse:

    {
    "id": 138,
    "hours": [
    { "day": "Monday", "hours": "8-7" },
    { "day": "Tuesday", "hours": "9-7" },
    { "day": "Wednesday", "hours": "6-8" },
    { "day": "Thursday", "hours": "6-6" },
    { "day": "Friday", "hours": "9-5" },
    { "day": "Saturday", "hours": "8-9" },
    { "day": "Sunday", "hours": "7-7" }
    ],
    "socials": [
    {
    "instagram": "staceyscountrybakehouse",
    "facebook": "staceyscountrybakehouse",
    "twitter": "staceyscountrybakehouse"
    }
    ],
    "website": "www.staceyscountrybakehouse.com",
    "description": "Lorem ipsum....",
    "phone": "(316) 157-8620"
    }

    We want to build an API that allows us to retrieve all or some of these extra details, and keep the overall structure of the document intact. For that, we'll need the Redis JSON feature as we'll see later.

    Checkins

    Checkins differ from users and locations in that they're not entities that we need to store forever. In our application, checkins consist of a user ID, a location ID, a star rating and a timestamp - we'll use these values to update attributes of our users and locations.

    Each checkin can be thought of as a flat map of name/value pairs, for example:

    {
    "userId": 789,
    "locationId": 171,
    "starRating": 5
    }

    Here, we see that user 789 visited location 171 ("Hair by Parvinder") and was really impressed with the service.

    We need a way to store checkins for long enough to process them, but not forever. We also need to associate a timestamp with each one, as we'll need that when we process the data.

    Redis provides a Stream data type that's perfect for this - with Redis Streams, we can store maps of name/value pairs and have the Redis server timestamp them for us. Streams are also perfect for the sort of asynchronous processing we want to do with this data. When a user posts a new checkin to our API we want to store that data and respond to the user that we've received it as quickly as possible. Later we can have one or more other parts of the system do further processing with it. Such processing might include updating the total number of checkins and last seen at fields for a user, or calculating a new average star rating for a location.

    Application Architecture

    We decided to use Node.js with the Express framework and ioredis client to build the application. Rather than have a monolithic codebase, the application has been split out into four components or services. These are:

    • Authentication Service: Listens on an HTTP port and handles user authentication using Redis as a shared session store that other services can access.
    • Checkin Receiver: Listens on an HTTP port and receives checkins as HTTP POST requests from our users. Each checkin is placed in a Redis Stream for later processing.
    • Checkin Processor: Monitors the checkin Stream in Redis, updating user and location information as it processes each checkin.
    • API Server: Implements the bulk of the application's API endpoints, including those to retrieve information about users and locations from Redis.

    These components fit together like so:

    Application Architecture

    There's also a data loader component, which we'll use to load some initial sample data into the system.

    Step 1. Cloning the repository

     git clone https://github.com/redislabs-training/node-js-crash-course.git
    cd node-js-crash-course
    npm install

    Step 2. Running Redis container

    From the node-js-crash-course directory, start Redis using docker-compose:

     $ docker-compose up -d
    Creating network "node-js-crash-course_default" with the default driver
    Creating rediscrashcourse ... done
    $ docker ps

    The output from the docker ps command should show one container running, using the "redis/redis-stack" image. This container runs Redis with the Search and Query, JSON, Time Series, and Probabilistic data features.

    Step 3. Load the Sample Data into Redis

    Load the course example data using the provided data loader. This is a Node.js application:

    $ npm run load all
    > node src/utils/dataloader.js -- "all"

    Loading user data...
    User data loaded with 0 errors.
    Loading location data...
    Location data loaded with 0 errors.
    Loading location details...
    Location detail data loaded with 0 errors.
    Loading checkin stream entries...
    Loaded 5000 checkin stream entries.
    Creating consumer group...
    Consumer group created.
    Dropping any existing indexes, creating new indexes...
    Created indexes.
    Deleting any previous bloom filter, creating new bloom filter...
    Created bloom filter.

    In another terminal window, run the redis-cli executable that's in the Docker container. Then, enter the Redis commands shown at the redis-cli prompt to verify that data loaded successfully:

    $ docker exec -it rediscrashcourse redis-cli
    127.0.0.1:6379> hgetall ncc:locations:106
    1) "id"
    2) "106"
    3) "name"
    4) "Viva Bubble Tea"
    5) "category"
    6) "cafe"
    7) "location"
    8) "-122.268645,37.764288"
    9) "numCheckins"
    10) "886"
    11) "numStars"
    12) "1073"
    13) "averageStars"
    14) "1"
    127.0.0.1:6379> hgetall ncc:users:12
    1) "id"
    2) "12"
    3) "firstName"
    4) "Franziska"
    5) "lastName"
    6) "Sieben"
    7) "email"
    8) "franziska.sieben@example.com"
    9) "password"
    10) "$2b$05$uV38PUcdFD3Gm6ElMlBkE.lzZutqWVE6R6ro48GsEjcmnioaZZ55C"
    11) "numCheckins"
    12) "8945"
    13) "lastCheckin"
    14) "1490641385511"
    15) "lastSeenAt"
    16) "22"
    127.0.0.1:6379> xlen ncc:checkins
    (integer) 5000

    Step 4. Start and Configure RedisInsight

    If you're using RedisInsight, start it up and it should open in your browser automatically. If not, point your browser at http://localhost:8001.

    If this is your first time using RedisInsight click "I already have a database".

    If you already have other Redis databases configured in RedisInsight, click "Add Redis Database".

    Now, click "Connect to a Redis Database Using hostname and port". Configure the database details as shown below, then click "Add Redis Database".

    Configuring RedisInsight

    You should now be able to browse your Redis instance. If you need more guidance on how to connect to Redis from RedisInsight, check out Justin's video below but be sure to use 127.0.0.1 as the host, 6379 as the port and leave the username and password fields blank when configuring your database.

    Step 5. Start the Application

    Now it's time to start the API Server component of the application and make sure it connects to Redis. This component listens on port 8081.

    If port 8081 is in use on your system, edit this section of the config.json file and pick another available port:

    "application": {
    "port": 8081
    },

    Start the server like this:

    $ npm run dev

    > ./node_modules/nodemon/bin/nodemon.js

    [nodemon] 2.0.7
    [nodemon] to restart at any time, enter `rs`
    [nodemon] watching path(s): *.*
    [nodemon] watching extensions: js,mjs,json
    [nodemon] starting `node src/server.js`
    Warning: Environment variable WEATHER_API_KEY is not set!
    info: Application listening on port 8081.

    This starts the application using nodemon, which monitors for changes in the source code and will restart the server when a change is detected. This will be useful in the next module where you'll be making some code changes.

    Ignore the warning about WEATHER_API_KEY — we'll address this in a later exercise when we look at using Redis as a cache.

    To verify that the server is running correctly and connected to Redis, point your browser at:

    http://localhost:8081/api/location/200

    You should see the summary information for location 200, Katia's Kitchen:

    {
    "id": "200",
    "name": "Katia's Kitchen",
    "category": "restaurant",
    "location": "-122.2349598,37.7356811",
    "numCheckins": "359",
    "numStars": "1021",
    "averageStars": "3"
    }

    Great! Now you're up and running. Let's move on to the next module and see how we're using Redis Hashes in the application. You'll also get to write some code!

    Step 6. Stopping redis-cli, the Redis Container and the Application

    Don't do this now, as we’ve only just started! However, when you do want to shut everything down, here's how to do it...

    To stop running redis-cli, simply enter the quit command at the redis-cli prompt:

    127.0.0.1:6379> quit
    $

    To stop the Redis Server, make sure you are in the node-js-crash-course folder that you checked the application repo out to, then:

    $ docker-compose down
    Stopping rediscrashcourse ... done
    Removing rediscrashcourse ... done
    Removing network node-js-crash-course_default

    Redis persists data to the "redisdata" folder. If you want to remove this, just delete it:

    $ rm -rf redisdata

    To stop each of the application's components, press Ctrl+C in the terminal window that the component is running in. For example, to stop the API server:

    $ npm run dev

    > ./node_modules/nodemon/bin/nodemon.js

    [nodemon] 2.0.7
    [nodemon] to restart at any time, enter `rs`
    [nodemon] watching path(s): *.*
    [nodemon] watching extensions: js,mjs,json
    [nodemon] starting `node src/server.js`
    info: Application listening on port 8081.
    ^C
    node-js-crash-course $

    We used Redis' built-in Hash data type to represent our user and location entities. Hashes are great for this, but they are limited in that they can only contain flat name/value pairs. For our locations, we want to store extra details in a more structured way.

    Here's an example of the additional data we want to store about a location:

    {
    "id": 121,
    "hours": [
    { "day": "Monday", "hours": "6-7" },
    { "day": "Tuesday", "hours": "6-7" },
    { "day": "Wednesday", "hours": "7-8" },
    { "day": "Thursday", "hours": "6-9" },
    { "day": "Friday", "hours": "8-5" },
    { "day": "Saturday", "hours": "9-6" },
    { "day": "Sunday", "hours": "6-4" }
    ],
    "socials": [
    {
    "instagram": "theginclub",
    "facebook": "theginclub",
    "twitter": "theginclub"
    }
    ],
    "website": "www.theginclub.com",
    "description": "Lorem ipsum...",
    "phone": "(318) 251-0608"
    }

    We could store this data as serialized JSON in a Redis String, but then our application would have to retrieve and parse the entire document every time it wanted to read some of the data. And we'd have to do the same to update it too. Furthermore, with this approach, update operations aren't atomic and a second client could update the JSON stored at a given key while we're making changes to it in our application code. Then, when we serialize our version of the JSON back into the Redis String, the other client's changes would be lost.

    Redis JSON adds a new JSON data type, and a query syntax for selecting and updating individual elements in a JSON document atomically on the Redis server. This makes our application code simpler, more efficient, and much more reliable.

    Step 7. Final exercise

    In this exercise, you'll complete the code for an API route that gets just the object representing a location's opening hours for a given day. Open the file src/routes/location_routes.js, and find the route for /location/:locationId/hours/:day. The starter code looks like this:

    // EXERCISE: Get opening hours for a given day.
    router.get(
    '/location/:locationId/hours/:day',
    [
    param('locationId').isInt({ min: 1 }),
    param('day').isInt({ min: 0, max: 6 }),
    apiErrorReporter,
    ],
    async (req, res) => {
    /* eslint-disable no-unused-vars */
    const { locationId, day } = req.params;
    /* eslint-enable */
    const locationDetailsKey = redis.getKeyName('locationdetails', locationId);

    // TODO: Get the opening hours for a given day from
    // the JSON stored at the key held in locationDetailsKey.
    // You will need to provide the correct JSON path to the hours
    // array and return the element held in the position specified by
    // the day variable. Make sure Redis JSON returns only the day
    // requested!
    const jsonPath = 'TODO';

    /* eslint-enable no-unused-vars */
    const hoursForDay = JSON.parse(
    await redisClient.call('JSON.GET', locationDetailsKey, jsonPath),
    );
    /* eslint-disable */

    // If null response, return empty object.
    res.status(200).json(hoursForDay || {});
    },
    );

    You'll need to update the code to provide the correct JSON path, replacing the "TODO" value with a JSON path expression.

    Looking at the JSON stored at key ncc:locationdetails:121, we see that the opening hours are stored as an array of objects in a field named hours, where day 0 is Monday and day 6 is Sunday:

    Location Details in RedisInsight

    So you'll need a JSON path query that gets the right element from the hours array depending on the value stored in the variable day.

    If you're using redis-cli, you can look at the structure of the JSON document with the following command:

    json.get ncc:locationdetails:121 .

    Make sure your query returns only the day requested, so that you don't have to write Node.js code to filter the value returned from Redis. Use the Redis JSON path syntax page to help you formulate the right query.

    To test your code, start the server with:

    $ npm run dev

    Recall that this will allow you to edit the code and try your changes without restarting the server.

    If you have the correct JSON path in your code, visiting http://localhost:80801/api/location/121/hours/2 should return:

    {
    "day": "Wednesday",
    "hours": "7-8"
    }

    External Resources

    - + \ No newline at end of file diff --git a/howtos/redisjson/using-dotnet/index.html b/howtos/redisjson/using-dotnet/index.html index fbcbdbf2af..627d3fcac0 100644 --- a/howtos/redisjson/using-dotnet/index.html +++ b/howtos/redisjson/using-dotnet/index.html @@ -4,7 +4,7 @@ Importing JSON data into Redis using .Net | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Importing JSON data into Redis using .Net

    - + \ No newline at end of file diff --git a/howtos/redisjson/using-go/index.html b/howtos/redisjson/using-go/index.html index 49c89b2cc8..65265479ad 100644 --- a/howtos/redisjson/using-go/index.html +++ b/howtos/redisjson/using-go/index.html @@ -4,7 +4,7 @@ How to cache JSON data in Redis with Go | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    How to cache JSON data in Redis with Go


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Go-ReJSON is a Go client for Redis JSON feature. It is a Golang client that support multiple Redis clients such as the print-like Redis-api client redigo and the type-safe Redis client go-redis.

    Follow the below steps to get started with Redis JSON using Go client.

    Step 1. Initialize the Go feature

     go mod init github.com/my/repo

    Step 2. Install Go-redis

     go get github.com/go-redis/redis/v8

    Step 3. Install Go client for Redis JSON

     go get github.com/nitishm/go-rejson/v4

    Step 4. Clone the repository

     git clone https://github.com/nitishm/go-rejson
    cd go-rejson/example

    Step 5. Build the Go package

    Command:
     go build json_set/json_set.go
    Result:
     go: downloading github.com/go-redis/redis/v8 v8.4.4
    go: downloading github.com/gomodule/redigo v1.8.3
    go: downloading go.opentelemetry.io/otel v0.15.0
    go build: build output "json_set" already exists and is a directory

    Step 6. Run the Go program

    Command:
     go run json_set/json_set.go
    Result:
     Executing Example_JSONSET for Redigo Client
    Success: OK
    Student read from redis : main.Student{Name:main.Name{First:"Mark", Middle:"S", Last:"Pronto"}, Rank:1}

    Executing Example_JSONSET for Redigo Client
    Success: OK
    Student read from redis : main.Student{Name:main.Name{First:"Mark", Middle:"S", Last:"Pronto"}, Rank:1}
    Command:
     pwd
    go-rejson/examples
    Command:
     go run json_array/json_array.go
    Result:
     Executing Example_JSONSET for Redigo Client
    arr: OK
    arr before pop: [one two three four five]
    Length: 5
    Deleted element: five
    arr after pop: [one two three four]
    Length: 4
    Index of "one": 0
    Out of range: -1
    "ten" not found: -1
    no. of elements left: 2
    arr after trimming to [1,2]: [two three]
    no. of elements: 3
    arr after inserting "one": [one two three]

    Executing Example_JSONSET for Redigo Client
    arr: OK
    arr before pop: [one two three four five]
    Length: 5
    Deleted element: five
    arr after pop: [one two three four]
    Length: 4
    Index of "one": 0
    Out of range: -1
    "ten" not found: -1
    no. of elements left: 2
    arr after trimming to [1,2]: [two three]
    no. of elements: 3
    arr after inserting "one": [one two three]

    References

    - + \ No newline at end of file diff --git a/howtos/redisjson/using-java/index.html b/howtos/redisjson/using-java/index.html index e7a433e604..37e6fc54df 100644 --- a/howtos/redisjson/using-java/index.html +++ b/howtos/redisjson/using-java/index.html @@ -4,7 +4,7 @@ Modeling JSON Documents with Redis and Java | The Home of Redis Developers - + @@ -17,7 +17,7 @@ documents whose key starts with "student:".
  • Then we actually create the index, called "student-index", by calling ftCreate ().
  • Schema schema = new Schema().addTextField("$.firstName", 1.0).addTextField("$" +
    ".lastName", 1.0);
    IndexDefinition rule = new IndexDefinition(IndexDefinition.Type.JSON)
    .setPrefixes(new String[]{"student:"});
    client.ftCreate("student-index",
    IndexOptions.defaultOptions().setDefinition(rule),
    schema);

    With an index now defined, we can query our JSON. Let's find all students whose name begins with "maya":

    Query q = new Query("@\\$\\" + ".firstName:maya*");
    SearchResult mayaSearch = client.ftSearch("student-index", q);

    We can then iterate over our search results:

    List<Document> docs = mayaSearch.getDocuments();
    for (Document doc : docs) {
    System.out.println(doc);
    }

    This example just scratches the surface. You can atomically manipulate JSON documents and query them in a variety of ways. See the Redis JSON docs, the Redis Search and Query docs, and our course, "Querying, Indexing, and Full-text Search in Redis", for a lot more examples.

    - + \ No newline at end of file diff --git a/howtos/redisjson/using-nodejs/index.html b/howtos/redisjson/using-nodejs/index.html index 02e59fda95..4e9e0674a9 100644 --- a/howtos/redisjson/using-nodejs/index.html +++ b/howtos/redisjson/using-nodejs/index.html @@ -4,7 +4,7 @@ How to cache JSON data in Redis with Node.js | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    How to cache JSON data in Redis with Node.js


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis
    Profile picture for Simon Prickett
    Author:
    Simon Prickett, Principal Developer Advocate at Redis

    Node.js has become incredibly popular for both web and mobile application development. Node.js can be installed on MacOS, Linux and Windows systems. The Node Package Manager (npm) enables developers to install packages which are tried and tested libraries that help you to build applications quickly.

    Node.js is a fast runtime, but adding the power, speed and flexibility of Redis can take it to the next level. Redis is best suited to situations that require data to be retrieved and delivered to the client as quickly as possible.

    Redis JSON is an add-on feature that adds JSON as a native data type to Redis. It enables atomic, in place operations to be performed on JSON documents stored in Redis.

    We'll use the node-redis client to connect to Redis and leverage the power of Redis JSON.

    Step 1. Run the Redis Stack Docker Container

    This simple container image bundles together the latest stable releases of Redis and select Redis features from Redis, Inc.

    $ docker run -d -p 6379:6379 redis/redis-stack:latest

    Step 2. Install Node.js

    Download and install the current LTS (Long Term Support) version of Node.js from the nodejs.org website.

    Step 3. Initialize an npm Project

    Run npm init to initialize a new project. Use the default answers to all the questions:

    $ mkdir jsondemo
    $ cd jsondemo
    $ npm init

    Now edit package.json and add the line "type": "module". The file should look something like this:

    {
    "name": "jsondemo",
    "type": "module",
    "version": "1.0.0",
    "description": "",
    "main": "index.js",
    "scripts": {
    "test": "echo \"Error: no test specified\" && exit 1"
    },
    "author": "",
    "license": "ISC"
    }

    Step 4. Install node-redis

    node-redis is a high performance Node.js Redis client with support for the Redis JSON feature. Install it using npm:

    $ npm install redis

    Step 5. Create a JavaScript File

    Copy the code below into a file called app.js:

    import { createClient } from 'redis';

    async function redisJSONDemo() {
    try {
    const TEST_KEY = 'test_node';

    const client = createClient();
    await client.connect();

    // Redis JSON uses JSON Path syntax. '.' is the root.
    await client.json.set(TEST_KEY, '.', { node: 4303 });
    const value = await client.json.get(TEST_KEY, {
    // JSON Path: .node = the element called 'node' at root level.
    path: '.node',
    });

    console.log(`value of node: ${value}`);

    await client.quit();
    } catch (e) {
    console.error(e);
    }
    }

    redisJSONDemo();

    Step 6. Run the Application

    Start the application as follows:

    $ node app.js

    You should see this output:

    value of node: 4303

    Using the Redis MONITOR command, you can see the Redis commands that node-redis sent to the Redis server while running the application:

    $ redis-cli
    127.0.0.1:6379> monitor
    OK

    1637866932.281949 [0 127.0.0.1:61925] "JSON.SET" "test_node" "." "{\"node\":4303}"
    1637866932.282842 [0 127.0.0.1:61925] "JSON.GET" "test_node" ".node"

    References

    - + \ No newline at end of file diff --git a/howtos/redisjson/using-python/index.html b/howtos/redisjson/using-python/index.html index 4efe26695f..44d39c1860 100644 --- a/howtos/redisjson/using-python/index.html +++ b/howtos/redisjson/using-python/index.html @@ -4,7 +4,7 @@ How to store JSON documents in Redis with Python | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    How to store JSON documents in Redis with Python

    Redis JSON is a source-available Redis feature that lets you store, manipulate, and query JSON documents in Redis. The standard Redis Python client (v4.0 or greater) supports all of the features of Redis JSON, and in this tutorial, we'll see how to get started with them.

    Step 1. Create a free Cloud account

    Create your free Redis Enterprise Cloud account. Once you click on “Get Started”, you will receive an email with a link to activate your account and complete your signup process.

    tip

    For a limited time, use TIGER200 to get $200 credits on Redis Enterprise Cloud and try all the advanced capabilities!

    🎉 Click here to sign up

    Step 2. Create Your database

    Choose your preferred cloud vendor. Select the region and then click "Let's start free" to create your free database automatically.

    tip

    If you want to create a custom database with your preferred name and type of redis, click "Create a custom database" option shown in the image.

    create database

    Step 3. Verify the database details

    You will be provided with Public endpoint URL and "Redis Stack" as the type of database with the list of features that comes by default.

    verify database

    Step 4. Using RedisInsight

    RedisInsight is a visual tool that lets you do both GUI- and CLI-based interactions with your Redis database, and so much more when developing your Redis based application. It is a fully-featured pure Desktop GUI client that provides capabilities to design, develop and optimize your Redis application. It works with any cloud provider as long as you run it on a host with network access to your cloud-based Redis server. It makes it easy to discover cloud databases and configure connection details with a single click. It allows you to automatically add Redis Enterprise Software and Redis Enterprise Cloud databases.

    Follow this link to install RedisInsight v2 on your local system. Assuming that you already have RedisInsight v2 installed on your MacOS, you can browse through the Applications and click "RedisInsight-v2" to bring up the Redis Desktop GUI tool.

    Step 5. Add Redis database

    access redisinsight

    Step 6. Enter Redis Enterprise Cloud details

    Add the Redis Enterprise cloud database endpoint, port and password.

    access redisinsight

    Step 7. Verify the database under RedisInsight dashboard

    database details

    Storing JSON in Redis

    Let's consider a simple JSON document structure representing a user:

     {
    "name": "Jane",
    "age": 33,
    "location: "Chawton"
    }

    Installing Redis

     $ pip3 install redis
    Collecting redis
    Downloading redis-4.2.0-py3-none-any.whl (225 kB)
    Collecting async-timeout>=4.0.2
    Downloading async_timeout-4.0.2-py3-none-any.whl (5.8 kB)
    Collecting typing-extensions
    Downloading typing_extensions-4.1.1-py3-none-any.whl (26 kB)
    ..
    Requirement already satisfied: packaging>=20.4 in /usr/lib/python3.8/site-packages (from redis) (20.4)
    Collecting wrapt<2,>=1.10
    Installing collected packages: async-timeout, typing-extensions, wrapt, deprecated, redis
    Running setup.py install for wrapt ... done
    Successfully installed async-timeout-4.0.2 deprecated-1.2.13 redis-4.2.0 typing-extensions-4.1.1 wrapt-1.14.0

    Here's the Python code to store JSON document in Redis:

    import redis
    from redis.commands.json.path import Path

    client = redis.Redis(host='localhost', port=6379, db=0)

    jane = {
    'name': "Jane",
    'Age': 33,
    'Location': "Chawton"
    }

    client.json().set('person:1', '$', jane)

    result = client.json().get('person:1')
    print(result)

    In the code above, we first connect to Redis and store a reference to the connection in the client variable.

    Next, we create a Python dictionary to represent a person object.

    And finally, we store the object in Redis using the json().set() method. The first argument, person:1 is the name of the key that will reference the JSON. The second argument is a JSON path. We use $, as this is a new object. Finally, we pass in the Python dictionary, which will be serialized to JSON.

    To retrieve the JSON object, we run json().get(), passing in the key. The result is a Python dictionary representing the JSON object stored in Redis.

    Run the code

    If you copy the code above into a file called main.py, you can run the code like so:

    $ pipenv python run main.py

    Verify that the JSON document has been added to Redis

    Start redis-cli to connect to your Redis instance. Then run the following command:

    localhost:6379> json.get person:1
    "{\"name\":\"Jane\",\"Age\":33,\"Location\":\"Chawton\"}"

    Fetching specific fields from a JSON document

    You can use Redis to fetch specific fields from a JSON document by specifying a path. For example, here's how to return only the name field:

    name = client.json().get('person:1', Path('.name'))
    print(name)

    This code will print the string "Jane".

    You can execute the same query from the command line:

    localhost:6379> json.get person:1 '.name'
    "\"Jane\""

    References

    - + \ No newline at end of file diff --git a/howtos/redisjson/using-redisinsight/index.html b/howtos/redisjson/using-redisinsight/index.html index ff7d0b0ddd..4113f6e1e9 100644 --- a/howtos/redisjson/using-redisinsight/index.html +++ b/howtos/redisjson/using-redisinsight/index.html @@ -4,7 +4,7 @@ How to visualize JSON data using RedisInsight | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    How to visualize JSON data using RedisInsight


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    RedisInsight provides built-in support for the Redis JSON, Search and Query, Streams, and Time Series features to make it even easier to query, visualize, and interactively manipulate search indexes, streams, and time-series data. Support for JSON on Redis Cluster was introduced for the first time in RedisInsight v1.8.0. With RedisInsight, you can visualize and edit your JSON data flawlessly.

    Below steps shows how to get started with Redis JSON using RedisInsight:

    Step 1. Create a free Cloud account

    Create your free Redis Enterprise Cloud account. Once you click on “Get Started”, you will receive an email with a link to activate your account and complete your signup process.

    tip

    For a limited time, use TIGER200 to get $200 credits on Redis Enterprise Cloud and try all the advanced capabilities!

    🎉 Click here to sign up

    Step 2. Create Your database

    Choose your preferred cloud vendor. Select the region and then click "Let's start free" to create your free database automatically.

    tip

    If you want to create a custom database with your preferred name and type of redis, click "Create a custom database" option shown in the image.

    create database

    Step 3. Verify the database details

    You will be provided with Public endpoint URL and "Redis Stack" as the type of database with the list of features that comes by default.

    verify database

    Step 4. Using RedisInsight

    RedisInsight is a visual tool that lets you do both GUI- and CLI-based interactions with your Redis database, and so much more when developing your Redis based application. It is a fully-featured pure Desktop GUI client that provides capabilities to design, develop and optimize your Redis application. It works with any cloud provider as long as you run it on a host with network access to your cloud-based Redis server. It makes it easy to discover cloud databases and configure connection details with a single click. It allows you to automatically add Redis Enterprise Software and Redis Enterprise Cloud databases.

    Follow this link to install RedisInsight v2 on your local system. Assuming that you already have RedisInsight v2 installed on your MacOS, you can browse through the Applications and click "RedisInsight-v2" to bring up the Redis Desktop GUI tool.

    Step 5. Add Redis database

    access redisinsight

    Step 6. Enter Redis Enterprise Cloud details

    Add the Redis Enterprise cloud database endpoint, port and password.

    access redisinsight

    Step 7. Verify the database under RedisInsight dashboard

    database details

    Step 8. Execute JSON queries

     JSON.SET employee_profile . '{ "employee":  { "name": "carol", "age": 40, "married": true } }'

    My Image

    Step 8. Accessing RedisInsight Browser Tool

    Select "employee_profile" to display the JSON data

    My Image

    Step 9. Add a new key

    My Image

    Step 10. Expand the JSON field

    My Image

    References

    - + \ No newline at end of file diff --git a/howtos/redisjson/using-ruby/index.html b/howtos/redisjson/using-ruby/index.html index f114ebe33e..92534449d1 100644 --- a/howtos/redisjson/using-ruby/index.html +++ b/howtos/redisjson/using-ruby/index.html @@ -4,7 +4,7 @@ How to cache JSON data in Redis with Ruby | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    How to cache JSON data in Redis with Ruby


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    rejson-rb is a package that allows storing, updating and querying objects as JSON documents in a Redis database that is extended with the JSON feature. The package extends redis-rb's interface with Redis JSON's API, and performs on-the-fly serialization/deserialization of objects to/from JSON.

    Step 1. Run Redis Stack docker container

     docker run -d -p 6379:6379 redis/redis-stack:latest

    Step 2. Install Ruby

     brew install ruby

    Step 3. Install Redis JSON Gem

     gem install rejson-rb

    Step 4. Create a ruby file

    Copy the below content and paste it in a file called 'employee.rb'.

     require 'rejson'

    rcl = Redis.new # Get a redis client

    # Get/Set/Delete keys
    obj = {
    'id': "42",
    'name': "Paul John",
    'email': "paul.john@gmail.com",
    'address': {
    'city': 'London'
    }
    }

    rcl.json_set("employee", Rejson::Path.root_path, obj)

    rcl.json_set("employee", Rejson::Path.new(".id"), 43)

    rcl.json_get "employee", Rejson::Path.root_path

    rcl.json_del "employee", ".address.city"

    The above script uses Redis JSON commands to set the objects, alter the id to 43 and then perform the delete operation using 'json_del'

    Step 5. Execute the script

     ruby employee.rb

    You can verify what's happening in the background by running the monitor command in Redis CLI shell:

     127.0.0.1:6379> monitor
    OK
    1627619198.040000 [0 172.17.0.1:57550] "JSON.SET" "employee" "." "{\"id\":\"42\",\"name\":\"Paul John\",\"email\":\"paul.john@gmail.com\",\"address\":{\"city\":\"London\"}}"
    1627619198.040876 [0 172.17.0.1:57550] "JSON.SET" "employee" ".id" "43"
    1627619198.042132 [0 172.17.0.1:57550] "JSON.GET" "employee" "."
    1627619198.042741 [0 172.17.0.1:57550] "JSON.DEL" "employee" ".address.city"

    References

    - + \ No newline at end of file diff --git a/howtos/redistimeseries/getting-started/images/index.html b/howtos/redistimeseries/getting-started/images/index.html index e7332437fb..c8298ab8e3 100644 --- a/howtos/redistimeseries/getting-started/images/index.html +++ b/howtos/redistimeseries/getting-started/images/index.html @@ -4,7 +4,7 @@ images | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/howtos/redistimeseries/getting-started/index.html b/howtos/redistimeseries/getting-started/index.html index 9a5b6f52c9..d00df517b6 100644 --- a/howtos/redistimeseries/getting-started/index.html +++ b/howtos/redistimeseries/getting-started/index.html @@ -4,7 +4,7 @@ Storing and Querying Time Series data using Redis Stack | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    Storing and Querying Time Series data using Redis Stack


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Time Series is a Redis feature developed by Redis Inc. to enhance your experience managing time-series data with Redis. It simplifies the use of Redis for time-series use cases such as internet of things (IoT) data, stock prices, and telemetry. With Time Series, you can ingest and query millions of samples and events at the speed of Redis. Advanced tooling such as downSampling and aggregation ensure a small memory footprint without impacting performance. Use a variety of queries for visualization and monitoring with built-in connectors to popular monitoring tools like Grafana, Prometheus, and Telegraf.

    Step 1. Create a free Cloud account

    Create your free Redis Enterprise Cloud account. Once you click on “Get Started”, you will receive an email with a link to activate your account and complete your signup process.

    tip

    For a limited time, use TIGER200 to get $200 credits on Redis Enterprise Cloud and try all the advanced capabilities!

    🎉 Click here to sign up

    Step 2. Create Your database

    Choose your preferred cloud vendor. Select the region and then click "Let's start free" to create your free database automatically.

    tip

    If you want to create a custom database with your preferred name and type of Redis, click "Create a custom database" option shown in the image.

    create database

    Step 3. Verify the database details

    You will be provided with Public endpoint URL and "Redis Stack" as the type of database with the list of features that comes by default.

    verify database

    Step 4. Install RedisInsight

    RedisInsight is a visual tool that lets you do both GUI- and CLI-based interactions with your Redis database, and so much more when developing your Redis based application. It is a fully-featured pure Desktop GUI client that provides capabilities to design, develop and optimize your Redis application. It works with any cloud provider as long as you run it on a host with network access to your cloud-based Redis server. It makes it easy to discover cloud databases and configure connection details with a single click. It allows you to automatically add Redis Enterprise Software and Redis Enterprise Cloud databases.

    You can install Redis Stack on your local system to get RedisInsight GUI tool up and running. Ensure that you have the brew package installed in your Mac system.

     brew tap redis-stack/redis-stack
    brew install --cask redis-stack
      ==> Installing Cask redis-stack-redisinsight
    ==> Moving App 'RedisInsight-preview.app' to '/Applications/RedisInsight-preview.app'
    🍺 redis-stack-redisinsight was successfully installed!
    ==> Installing Cask redis-stack
    🍺 redis-stack was successfully installed!

    Go to Applications and click "RedisInsight-v2" to bring up the Redis Desktop GUI tool.

    Step 5. Add Redis database

    access redisinsight

    Step 6. Enter Redis Enterprise Cloud details

    Add the Redis Enterprise cloud database endpoint, port and password.

    access redisinsight

    Step 7. Verify the database under RedisInsight dashboard

    database details

    Step 8. Getting Started with Redis Time Series feature

    This section will walk you through using some basic Time Series commands in Redis. You can run them from the Redis command-line interface (redis-cli) or use the CLI available in RedisInsight. (See part 2 of this tutorial to learn more about using the RedisInsight CLI.) Using a basic air-quality dataset, we will show you how to:

    • Create a new time series
    • Add a new sample to the list of series
    • Query a range across one or multiple time series

    Time Series

    Create a new time series

    Let’s create a time series representing air quality dataset measurements. To interact with Time Series you will most often use the TS.RANGE command, but here you will create a time series per measurement using the TS.CREATE command. Once created, all the measurements will be sent using TS.ADD.

    The sample command below creates a time series and populates it with three entries:

    >> TS.CREATE ts:carbon_monoxide
    >> TS.CREATE ts:relative_humidity
    >> TS.CREATE ts:temperature RETENTION 60 LABELS sensor_id 2 area_id 32

    In the above example, ts:carbon_monoxide, ts:relative_humidity and ts:temperature are key names. We are creating a time series with two labels (sensor_id and area_id are the fields with values 2 and 32 respectively) and a retention window of 60 milliseconds:

    Add a new sample data to the time series

    Let’s start to add samples into the keys that will be automatically created using this command:

    >> TS.ADD ts:carbon_monoxide 1112596200 2.4
    >> TS.ADD ts:relative_humidity 1112596200 18.3
    >> TS.ADD ts:temperature 1112599800 28.3
    >> TS.ADD ts:carbon_monoxide 1112599800 2.1
    >> TS.ADD ts:relative_humidity 1112599800 13.5
    >> TS.ADD ts:temperature 1112603400 28.5
    >> TS.ADD ts:carbon_monoxide 1112603400 2.2
    >> TS.ADD ts:relative_humidity 1112603400 13.1
    >> TS.ADD ts:temperature 1112607000 28.7

    Querying the sample

    Now that you have sample data in your time series, you’re ready to ask questions such as:

    “How do I get the last sample?”

    TS.GET is used to get the last sample. The returned array will contain the last sample timestamp followed by the last sample value, when the time series contains data:

    >> TS.GET ts:temperature
    1) (integer) 1112607000
    2) "28.7"

    “How do I get the last sample matching the specific filter?”

    TS.MGET is used to get the last samples matching the specific filter:

    >> TS.MGET FILTER area_id=32
    1) 1) "ts:temperature"
    2) (empty list or set)
    3) 1) (integer) 1112607000
    2) "28.7"

    “How do I get the sample with labels matching the specific filter?”

    >> TS.MGET WITHLABELS FILTER area_id=32
    1) 1) "ts:temperature"
    2) 1) 1) "sensor_id"
    2) "2"
    2) 1) "area_id"
    2) "32"
    3) 1) (integer) 1112607000
    2) "28.7"

    Query a range across one or more time series

    TS.RANGE is used to query a range in forward directions while TS.REVRANGE is used to query a range in reverse directions, They let you answer such questions as:

    “How do I get the sample for a time range?”

    >> TS.RANGE ts:carbon_monoxide 1112596200 1112603400
    1) 1) (integer) 1112596200
    2) "2.4"
    2) 1) (integer) 1112599800
    2) "2.1"
    3) 1) (integer) 1112603400
    2) "2.2"

    Aggregation

    You can use various aggregation types such as avg, sum, min, max, range, count, first, last etc. The example below example shows how to use “avg” aggregation type to answer such questions as:

    “How do I get the sample for a time range on some aggregation rule?”

    >> TS.RANGE ts:carbon_monoxide 1112596200 1112603400 AGGREGATION avg 2
    1) 1) (integer) 1112596200
    2) "2.4"
    2) 1) (integer) 1112599800
    2) "2.1"
    3) 1) (integer) 1112603400
    2) "2.2"

    Next Steps

    Redis Launchpad
    - + \ No newline at end of file diff --git a/howtos/redistimeseries/index.html b/howtos/redistimeseries/index.html index 2a50c4d606..b15601fb72 100644 --- a/howtos/redistimeseries/index.html +++ b/howtos/redistimeseries/index.html @@ -4,7 +4,7 @@ Time Series in Redis Tutorial | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Time Series in Redis Tutorial

    The following links provides you with the available options to get started with Time Series in Redis

    Storing and querying time series data
    How to collect and process time-series data using Redis and Go
    How to collect and process time-series data using Redis and Python
    Using Redis Time Series with Prometheus and Grafana
    - + \ No newline at end of file diff --git a/howtos/redistimeseries/using-dotnet/index.html b/howtos/redistimeseries/using-dotnet/index.html index 303d91b0ca..572d4ceb36 100644 --- a/howtos/redistimeseries/using-dotnet/index.html +++ b/howtos/redistimeseries/using-dotnet/index.html @@ -4,7 +4,7 @@ Processing Time Series data with Redis and .NET | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Processing Time Series data with Redis and .NET

    Time Series data can be used to measure anything from remote sensor readings to stock market feeds. Working with time series data in .NET is a snap with Redis and NRedisTimeSeries. In this tutorial, we'll explore how to use them together.

    Create your Project

    Start out by creating a project with the command:

    dotnet new console -n TimeSeriesDemoApp

    Next, inside the TimeSeriesDemoApp directory, run the command:

    dotnet add package NRedisTimeSeries

    Get a Redis Database

    The next step is to get a Redis database up and running. The easiest way to do that for development purposes is to use Docker:

    docker run -p 6379:63379 redis/redis-stack-server:latest

    If you are well past getting started and want to get something into your production, your best bet is to run it in Redis Enterprise.

    Connecting to Redis

    Open the Program.cs file, in here, create a new ConnectionMultiplexer using a connection string (which will vary based on what deployment you're using). Then, for our basic Docker setup, you'll just run:

    var muxer = ConnectionMultiplexer.Connect("localhost");
    var db = muxer.GetDatabase();

    Create a Time Series

    Now that you've gotten a handle to Redis, your next step is to initialize a time series. This will be a bit of a toy example. We are going to start off by just creating a time series called sensor, we will set its retention period to 1 minute, and we just give it an id label of sensor-1:

    await db.TimeSeriesCreateAsync("sensor", 60000, new List<TimeSeriesLabel>{new TimeSeriesLabel("id", "sensor-1")});

    Producer Task

    Next, we'll create a task that will run a consumer in the background. Every second it will send a random integer between 1 and 50 into our time series.

    var producerTask = Task.Run(async()=>{
    while(true)
    {
    await db.TimeSeriesAddAsync("sensor", "*", Random.Shared.Next(50));
    await Task.Delay(1000);
    }
    });

    Consumer Task

    With the Producer created, we'll create a consumer loop that will do the opposite. Every second it will pull the most recent item in the time series off and print it out.

    var consumerTask = Task.Run(async()=>{
    while(true)
    {
    await Task.Delay(1000);
    var result = await db.TimeSeriesGetAsync("sensor");
    Console.WriteLine($"{result.Time.Value}: {result.Val}");
    }
    });

    await Task.WhenAll(producerTask, consumerTask);

    Run the App

    Now that we produce and consume data run the app with dotnet run. This will run a continuous loop in the time series as it continually produces and consumes data points.

    Run Aggregations in the Time Series

    Now what we've done so far is produce a time series of random integer data for our .NET app to consume. What if we wanted to do something a bit more interesting with it, though? Let's say we wanted to calculate a moving average every 5 seconds. We can do that with ease using Time Series.

    Create Rules to Store Aggregations

    Let's run min, max, and average every 5 seconds on our Time Series. Redis will do this passively in the background after we set up some keys to store them in and set up the rules.

    var aggregations = new TsAggregation[]{TsAggregation.Avg, TsAggregation.Min, TsAggregation.Max};
    foreach(var agg in aggregations)
    {
    await db.TimeSeriesCreateAsync($"sensor:{agg}", 60000, new List<TimeSeriesLabel>{new ("type", agg.ToString()), new("aggregation-for", "sensor-1")});
    await(db.TimeSeriesCreateRuleAsync("sensor", new TimeSeriesRule($"sensor:{agg}", 5000, agg)));
    }

    Process Results from Aggregations

    With the rules established, we can consume the relevant time series to get the results. When we were creating the time series for our aggregations, we added a label to all of them: new TimeSeriesLabel("aggregation-for", "sensor-1"). We essentially told Redis that this time series would be an aggregation for sensor-1. We can then use that label to find just the time series aggregations of sensor-1. With this in mind, we can grab all the sensor aggregations in one command to Redis using MGET.

    var aggregationConsumerTask = Task.Run(async()=>
    {
    while(true)
    {
    await Task.Delay(5000);
    var results = await db.TimeSeriesMGetAsync(new List<string>(){"aggregation-for=sensor-1"}, true);
    foreach(var result in results)
    {
    Console.WriteLine($"{result.labels.First(x=>x.Key == "type").Value}: {result.value.Val}");
    }

    }
    });

    With all these sets, you can now just update the Task.WhenAll call at the end to include the new consumer task:

    await Task.WhenAll(producerTask, consumerTask, aggregationConsumerTask);

    When we run the application with dotnet run, you will see that the application will also print out the average, min, and max for the last 5 seconds of the time series, in addition to the regular ticks of the time series.

    Resources

    • The Source Code for this demo is located in GitHub
    • The source code for NRedisTimeSeries is also located in GitHub
    • More information about Time Series with Redis can be found at redistimeseries.io
    - + \ No newline at end of file diff --git a/howtos/redistimeseries/using-go/index.html b/howtos/redistimeseries/using-go/index.html index 76d2c1e067..5ed6407c3b 100644 --- a/howtos/redistimeseries/using-go/index.html +++ b/howtos/redistimeseries/using-go/index.html @@ -4,7 +4,7 @@ How to collect and process time-series data using Redis and Go | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    How to collect and process time-series data using Redis and Go


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    My Image

    Time Series is a Redis feature, It allows Redis to be used as a fast in-memory time series database designed to collect, manage, and deliver time series data at scale. The Time Series feature shares the performance and simplicity aspects of Redis. Under the hood, it uses efficient data structures such as Radix tree to index data by timestamp, which makes it extremely fast and efficient to run time-aggregate queries.

    Time Series with Redis and Go Client

    redistimeseries-go is a package that gives developers easy access to the Time Series feature in Redis. (Go client for Time Series), based on redigo.Client and ConnPool based on the work of dvirsky and mnunberg on https://github.com/RediSearch/redisearch-go

    Follow the steps below to get started with Time Series with Redis in Go lang:

    Step 1. Create free Redis Enterprise Cloud account

    Create your free Redis Enterprise Cloud account. Once you click on “Get Started”, you will receive an email with a link to activate your account and complete your signup process.

    alt_text

    alt_text

    Step 2. Create Your subscription

    Next, you will have to create a Redis Enterprise Cloud subscription. In the Redis Enterprise Cloud menu, click "Create your Subscription".

    alt_text

    Step 3. Select the right Subscription Plan

    Select "Fixed Plan" for low throughout application as for now.

    alt_text

    Step 4. Select cloud vendor

    For the cloud provider, select your preferred cloud (for demo purpose)

    alt_text

    Step 5. Click "Create Subscription"

    Finally, click on the "Create Subscription" button.

    alt_text

    You can now verify the subscription as shown below:

    alt_text

    Step 6. Create database

    Click "Create Database". Enter database name and your preferred feature.

    alt_text

    Step 7.Installing Time Series Go client for Redis

    $ go get github.com/RedisTimeSeries/redistimeseries-go

    Step 8. Writing the Go program


    package main

    import (
    "fmt"
    redistimeseries "github.com/RedisTimeSeries/redistimeseries-go"
    )

    func main() {
    // Connect to localhost with no password
    var client = redistimeseries.NewClient("redis-XXXX.c264.ap-south-1-1.ec2.cloud.redislabs.com:port", "add your password here", nil)
    var keyname = "mytest"
    _, haveit := client.Info(keyname)
    if haveit != nil {
    client.CreateKeyWithOptions(keyname, redistimeseries.DefaultCreateOptions)
    client.CreateKeyWithOptions(keyname+"_avg", redistimeseries.DefaultCreateOptions)
    client.CreateRule(keyname, redistimeseries.AvgAggregation, 60, keyname+"_avg")
    }
    // Add sample with timestamp from server time and value 100
    // TS.ADD mytest * 100
    _, err := client.AddAutoTs(keyname, 100)
    if err != nil {
    fmt.Println("Error:", err)
    }
    }

    Step 9. Run the Go program

     go run test.go

    Step 10. Monitor the Redis database

    monitor
    OK
    1635490098.157530 [0 52.149.144.189:48430] "TS.INFO" "mytest"
    1635490098.353530 [0 52.149.144.189:48430] "TS.CREATE" "mytest"
    1635490098.553530 [0 52.149.144.189:48430] "TS.CREATE" "mytest_avg"
    1635490098.753530 [0 52.149.144.189:48430] "TS.CREATERULE" "mytest" "mytest_avg" "AGGREGATION" "AVG" "60"
    1635490098.949529 [0 52.149.144.189:48430] "TS.ADD" "mytest" "*" "100"

    References

    - + \ No newline at end of file diff --git a/howtos/redistimeseries/using-prometheus/index.html b/howtos/redistimeseries/using-prometheus/index.html index 92f26784fc..9c5fed1a19 100644 --- a/howtos/redistimeseries/using-prometheus/index.html +++ b/howtos/redistimeseries/using-prometheus/index.html @@ -4,7 +4,7 @@ How to monitor Redis with Prometheus and Grafana for Real-Time Analytics | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    How to monitor Redis with Prometheus and Grafana for Real-Time Analytics


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    My Image

    Time-series data is basically a series of data stored in time order and produced continuously over a long period of time. These measurements and events are tracked, monitored, downSampled, and aggregated over time. The events could be, for example, IoT sensor data. Every sensor is a source of time-series data. Each data point in the series stores the source information and other sensor measurements as labels. Data labels from every source may not conform to the same structure or order.

    A time-series database is a database system designed to store and retrieve such data for each point in time. Timestamped data can include data generated at regular intervals as well as data generated at unpredictable intervals.

    When do you use a time-series database?

    • When your application needs data that accumulates quickly and your other databases aren’t designed to handle that scale.
    • For financial or industrial applications.
    • When your application needs to perform real-time analysis of billions of records.
    • When your application needs to perform online queries at millisecond timescales, and support CPU-efficient ad-hoc queries.

    Challenges with the existing traditional databases

    You might find numerous solutions that still store time-series data in a relational database, but they’re quite inefficient and come with their own set of drawbacks. A typical time-series database is usually built to only manage time-series data, hence one of the challenges it faces is with use cases that involve some sort of computation on top of time-series data. One good example could be capturing a live video feed in a time-series database. If you want to run an AI model for face recognition, you would have to extract the time-series data, apply some sort of data transformation and then do computation. Relational databases carry the overhead of locking and synchronization that aren’t required for the immutable time-series data. This results in slower-than-required performance for both ingest and queries. When scaling out, it also means investing in additional compute resources. These databases enforce a rigid structure for labels and can’t accommodate unstructured data. They also require scheduled jobs for cleaning up old data. Beyond the time-series use case, these databases are also used for other use cases, which means overuse of running time-series queries may affect other workloads.

    What is Time Series with Redis?

    Redis has a purpose-built time-series feature that addresses the needs of handling time-series data. It removes the limitations enforced by relational databases and enables you to collect, manage, and deliver time-series data at scale. As an in-memory database, Redis can ingest over 500,000 records per second on a standard node. Our benchmarks show that you can ingest over 11.5 million records per second with a cluster of 16 Redis shards.

    Time Series with Redis is resource-efficient. you can also add rules to compact data by downSampling. For example, if you’ve collected more than one billion data points in a day, you could aggregate the data by every minute in order to downSample it, thereby reducing the dataset size to 1,440 data points (24 * 60 = 1,440). You can also set data retention policies and expire the data by time when you don’t need them anymore. Time Series feature allows you to aggregate data by average, minimum, maximum, sum, count, range, first, and last. You can run over 100,000 aggregation queries per second with sub-millisecond latency. You can also perform reverse lookups on the labels in a specific time range.

    Notables features of Time Series with Redis includes:

    • High volume inserts, low latency reads
    • Query by start time and end-time
    • Aggregated queries (Min, Max, Avg, Sum, Range, Count, First, Last, STD.P, STD.S, Var.P, Var.S) for any time bucket
    • Configurable maximum retention period
    • DownSampling/Compaction - automatically updated aggregate time series
    • Secondary index - each time series has labels (field value pairs) which will allows to query by labels

    Why Prometheus?

    Prometheus is an open-source systems monitoring and alerting toolkit. It collects and stores its metrics as time series data, i.e. metrics information. The metrics are numeric measurements in a time series, meaning changes recorded over time. These metrics are stored with the timestamp at which it was recorded, alongside optional key-value pairs called labels. Metrics play an important role in understanding why your application is working in a certain way.

    Prometheus remote storage adapter for Time Series Redis feature

    In the Redis Time Series organization you can find projects that help you integrate Time Series Redis feature with other tools, including Prometheus and Grafana. The Prometheus remote storage adapter for Time Series with Redis is available and the project is hosted over at https://github.com/RedisTimeSeries/prometheus-redistimeseries-adapter. It’s basically a read/write adapter to use Redis as a backend database. Time Series Adapter receives Prometheus metrics via the remote write, and writes to Redis.

    Getting Started

    Prerequisite:

    • Install GIT
    • Install Docker
    • Install Docker Compose

    Step 1. Clone the repository

     git clone https://github.com/RedisTimeSeries/prometheus-redistimeseries-adapter

    Step 2. Examining the Docker Compose File

    This Docker compose defines 4 services -

    1. Prometheus
    2. Adapter
    3. Grafana
    4. Redis
     version: '3'
    services:
    prometheus:
    image: "prom/prometheus:v2.8.0"
    command: ["--config.file=/prometheus.yml"]
    volumes:
    - ./prometheus.yaml:/prometheus.yml
    ports:
    - 9090:9090
    adapter:
    image: "redislabs/prometheus-redistimeseries-adapter:master"
    command: ["-redis-address", "redis:6379", "-web.listen-address", "0.0.0.0:9201"]
    redis:
    image: "redislabs/redistimeseries:edge"
    ports:
    - "6379:6379"
    grafana:
    build: ./grafana/
    ports:
    - "3000:3000"

    Prometheus

    The prometheus service directly uses an image “prom/prometheus” that’s pulled from Docker Hub. It then binds the container and the host machine to the exposed port, 9090. The prometheus configuration file is accessed by mounting the volume on the host and container.

    Storage Adapter

    The adapter service uses an image “redislabs/prometheus-redistimeseries-adapter:master” that’s pulled from Docker Hub. Sets the default command for the container: -redis-address", "redis:6379 and listen to the address 0.0.0.0:9201.

    Redis

    The Redis service directly uses an image “redislabs/redistimeseries:edge” that’s pulled from Docker Hub. It then binds the container and the host machine to the exposed port, 6379

    Grafana

    The grafana service uses an image that’s built from the Dockerfile in the current directory. It then binds the container and the host machine to the exposed port, 3000.

    Step 3. Run the Docker Compose

    Change directory to compose and execute the below CLI:

     docker-compose up -d
     ajeetraina@Ajeets-MacBook-Pro compose % docker-compose ps
    NAME COMMAND SERVICE STATUS PORTS
    compose-adapter-1 "/adapter/redis-ts-a…" adapter running
    compose-grafana-1 "/run.sh" grafana running 0.0.0.0:3000->3000/tcp
    compose-prometheus-1 "/bin/prometheus --c…" prometheus running 0.0.0.0:9090->9090/tcp
    compose-redis-1 "docker-entrypoint.s…" redis running 0.0.0.0:6379->6379/tcp

    Step 4. Accessing the Grafana

    Open http://hostIP:3000 to access the Grafana dashboard. The default username and password is admin/admin.

    Step 5. Add Prometheus Data Source

    In the left sidebar, you will see the “Configuration” option. Select “Data Source” and choose Prometheus.

    alt_text

    Click “Save and Test”.

    Step 6. Importing Prometheus Data Source

    Click on “Import” for all the Prometheus dashboards.

    alt_text

    Step 7. Adding Redis Datasource

    Again, click on “Data Sources” and add Redis.

    alt_text

    Click "Import".

    alt_text

    Step 8. Running the Sensor Script

    It’s time to test drive a few demo scripts built by the Redis team. To start with, clone the below repository

     git clone https://github.com/RedisTimeSeries/prometheus-demos

    This repo contains a set of basic demos showcasing the integration of Time Series Redis feature with Prometheus and Grafana. Let’s pick up a sensor script.

     python3 weather_station/sensors.py

    This script will add random measurements for temperature and humidity for a number of sensors.

    Go to “Add Panel” on the top right corner of the Grafana dashboard and start adding temperature and humidity values.

    alt_text

    Step 9. Accessing Prometheus Dashboard

    Open up https://HOSTIP:9090 to access Prometheus dashboard for the sensor values without any further configuration.

    alt_text

    Further References:

    Redis Launchpad
    - + \ No newline at end of file diff --git a/howtos/redistimeseries/using-python/index.html b/howtos/redistimeseries/using-python/index.html index 9aabf207cb..1162675523 100644 --- a/howtos/redistimeseries/using-python/index.html +++ b/howtos/redistimeseries/using-python/index.html @@ -4,7 +4,7 @@ How to collect and process time-series data using Redis and Python | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    How to collect and process time-series data using Redis and Python


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Time Series

    Time series data is a series of data stored in the time order (Example: Stock performance over time). Industries today are collecting and analyzing time-based data more than ever before. Traditional databases that rely on relational or document data models are designed neither for storing and indexing data based on time, nor for running time-bucketed aggregation queries. Time-series databases fill this void by providing a data model that optimizes data indexing and querying by time.

    Time Series is a Redis feature, It allows Redis to be used as a fast in-memory time series database designed to collect, manage, and deliver time series data at scale. The Time Series feature shares the performance and simplicity aspects of Redis. Under the hood, it uses efficient data structures such as Radix tree to index data by timestamp, which makes it extremely fast and efficient to run time-aggregate queries.

    Python Client for Time Series with Redis

    tip

    As of redis-py 4.0.0, the redistimeseries-py library is deprecated. It's features have been merged into redis-py. Please either install it from pypy or the repo.

    Follow the steps below to get started with Time Series with Redis in Python:

    Step 1. Create a free Cloud account

    Create your free Redis Enterprise Cloud account. Once you click on “Get Started”, you will receive an email with a link to activate your account and complete your signup process.

    tip

    For a limited time, use TIGER200 to get $200 credits on Redis Enterprise Cloud and try all the advanced capabilities!

    🎉 Click here to sign up

    Step 2. Create Your database

    Choose your preferred cloud vendor. Select the region and then click "Let's start free" to create your free database automatically.

    tip

    If you want to create a custom database with your preferred name and type of redis, click "Create a custom database" option shown in the image.

    create database

    Step 3. Verify the database details

    You will be provided with Public endpoint URL and "Redis Stack" as the type of database with the list of features that comes by default.

    verify database

    Step 4.Installation

    $ pip install redis

    Step 5. Create a script file

     import redis
    r = redis.Redis(host='redis-18386.c110.qa.us-east-1-1.ec2.qa-cloud.redislabs.com', port=<add port>, password=<add password>)
    r.ts().create(2, retension_msecs=5)

    Save the above file with a name "ts.py".

    Step 6. Executing the python script

     python3 ts.py

    Step 7. Monitor the Redis database

      1648389303.557366 [0 20.127.62.215:59768] "TS.CREATE" "2"

    References

    Redis Launchpad
    - + \ No newline at end of file diff --git a/howtos/security/index.html b/howtos/security/index.html index 581a586720..6aed82ecdc 100644 --- a/howtos/security/index.html +++ b/howtos/security/index.html @@ -4,7 +4,7 @@ How to Use SSL/TLS With Redis Enterprise | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    How to Use SSL/TLS With Redis Enterprise

    Header

    In this article, you will see how to secure your Redis databases using SSL (Secure Sockets Layer). In the production environment, it is always recommended to use SSL to protect the data that moves between various computers (client applications and Redis servers). Transport Level Security (TLS) guarantees that only allowed applications/computers are connected to the database, and also that data is not viewed or altered by a middle man process.

    You can secure the connections between your client applications and Redis cluster using:

    • One-Way SSL: the client (your application) get the certificate from the server (Redis cluster), validate it, and then all communications are encrypted
    • Two-Way SSL: (aka mutual SSL) here both the client and the server authenticate each other and validate that both ends are trusted.

    In this article, we will focus on the Two-Way SSL, and using Redis Enterprise.

    Prerequisites

    • A Redis Enterprise 6.0.x database, (my database is protected by the password secretdb01, and listening on port 12000)
    • redis-cli to run basic commands
    • Python, Node, and Java installed if you want to test various languages.

    Simple Test

    Step 1. Run a Redis server

    You can either run Redis server in a Docker container or directly on your machine. Use the following commands to setup a Redis server locally on Mac OS:

     brew tap redis-stack/redis-stack
    brew install --cask redis-stack
    INFO

    Redis Stack unifies and simplifies the developer experience of the leading Redis modules and the capabilities they provide. Redis Stack supports the folliwng in additon to Redis: JSON, Search, Time Series, Triggers and Functions, and Probilistic data structures. Learn more

    Let's make sure that the database is available:

    redis-cli -p 12000 -a secretdb01 INFO SERVER

    This should print the Server information.

    Step 2. Get the Certificate from Redis Cluster

    Assuming that you have an access to the Redis Enterprise Cluster, you need to access the nodes to retrieve the certificate (that is a self-generated one by default).

    The cluster certificate is located at: /etc/opt/redislabs/proxy_cert.pem.

    Next, copy the cluster certificate on each client machine; note that once it is done you can use this certificate to connect using "One-Way SSL", but this is not just the purpose of this article.

    In this tutorial, we will be using Docker to copy the certificate.

    docker cp redis-node1:/etc/opt/redislabs/proxy_cert.pem ./certificates

    Step 3. Generate a New Client Certificate

    Using the Two-Way SSL, you need to have a certificate for the client that will be used by Redis database proxy to trust the client. In this tutorial, we will use a self-signed certificate using OpenSSL. We will be creating a certificate for an application named app_001. Please note that you can create as many certificates as you want, or reuse this one for all servers/applications.

    Open a terminal and run the following commands:


    openssl req \
    -nodes \
    -newkey rsa:2048 \
    -keyout client_key_app_001.pem \
    -x509 \
    -days 36500 \
    -out client_cert_app_001.pem

    This command generates a new client key (client_key_001.pem) and certificate (client_cert_001.pem) with no passphrase.

    Step 4. Configure the Redis Database

    The next step is to take the certificate and add it to the database you want to protect.

    Let's copy the certificate and paste it into the Redis Enterprise Web Console.

    Copy the certificate in your clipboard:

    Mac:

    pbcopy < client_cert_app_001.pem

    Linux:

     xclip -sel clip < client_cert_app_001.pem

    Windows:

    clip < client_cert_app_001.pem

    Go to the Redis Enterprise Admin Web Console and enable TLS on your database:

    1. Edit the database configuration
    2. Check TLS
    3. Select "Require TLS for All communications"
    4. Check "Enforce client authentication"
    5. Paste the certificate in the text area
    6. Click the Save button to save the certificate
    7. Click the Update button to save the configuration.

    Security Configuration

    The database is now protected, and it is mandatory to use the SSL certificate to connect to it.

    redis-cli -p 12000 -a secretdb01 INFO SERVER
    (error) ERR unencrypted connection is prohibited

    Step 5. Connect to the Database using the Certificate

    In all these examples, you will be using a "self-signed" certificate, so that you don't need to check the validity of the hostname. You should adapt the connections/TLS information based on your certificate configuration.

    Step 5.1 Using Redis-CLI

    To connect to a SSL protected database using redis-cli you have to use stunnel.

    Create a stunnel.conf file with the following content:

    cert = /path_to/certificates/client_cert_app_001.pem
    key = /path_to/certificates/client_key_app_001.pem
    cafile = /path_to/certificates/proxy_cert.pem
    client = yes

    [redislabs]
    accept = 127.0.0.1:6380
    connect = 127.0.0.1:12000

    Start stunnel using the command

    stunnel ./stunnel.conf

    This will start a process that listen to port 6380 and used as a proxy to the Redis Enterprise database on port 12000.

    redis-cli -p 6380 -a secretdb01 INFO SERVER

    Step 5.2 Using Python

    Using Python, you have to set the SSL connection parameters:

    #!/usr/local/bin/python3

    import redis
    import pprint

    try:
    r = redis.StrictRedis(
    password='secretdb01',
    decode_responses=True,
    host='localhost',
    port=12000,
    ssl=True,
    ssl_keyfile='./client_key_app_001.pem',
    ssl_certfile='./client_cert_app_001.pem',
    ssl_cert_reqs='required',
    ssl_ca_certs='./proxy_cert.pem',
    )

    info = r.info()
    pprint.pprint(info)

    except Exception as err:
    print("Error connecting to Redis: {}".format(err))

    More information in the documentation "Using Redis with Python".

    Step 5.3 Using Node.JS

    For Node Redis, use the TLS library to configure the client connection:

    var redis = require('redis');
    var tls = require('tls');
    var fs = require('fs');

    var ssl = {
    key: fs.readFileSync(
    '../certificates/client_key_app_001.pem',
    {encoding: 'ascii'},
    ),
    cert: fs.readFileSync(
    '../certificates/client_cert_app_001.pem',
    {encoding: 'ascii'},
    ),
    ca: [fs.readFileSync('../certificates/proxy_cert.pem', {encoding: 'ascii'})],
    checkServerIdentity: () => {
    return null;
    },
    };

    var client = redis.createClient(12000, '127.0.0.1', {
    password: 'secretdb01',
    tls: ssl,
    });

    client.info('SERVER', function (err, reply) {
    console.log(reply);
    });

    More information in the documentation "Using Redis with Node.js".

    Step 5.4 Using Java

    In Java, to be able to connect using SSL, you have to install all the certificates in the Java environment using the keytool utility.

    Create a keystore file that stores the key and certificate you have created earlier:

    openssl pkcs12 -export \
    -in ./client_cert_app_001.pem \
    -inkey ./client_key_app_001.pem \
    -out client-keystore.p12 \
    -name "APP_01_P12"

    As you can see the keystore is used to store the credentials associated with you client; it will be used later with the -javax.net.ssl.keyStore system property in the Java application.

    In addition to the keys tore, you also have to create a trust store, that is used to store other credentials for example in our case the redis cluster certificate.

    Create a trust store file and add the Redis cluster certificate to it

    keytool -genkey \
    -dname "cn=CLIENT_APP_01" \
    -alias truststorekey \
    -keyalg RSA \
    -keystore ./client-truststore.p12 \
    -keypass secret
    -storepass secret
    -storetype pkcs12
    keytool -import \
    -keystore ./client-truststore.p12 \
    -file ./proxy_cert.pem \
    -alias redis-cluster-crt

    The trustore will be used later with the -javax.net.ssl.trustStore system property in the Java application.

    You can now run the Java application with the following environment variables:

    java -Djavax.net.ssl.keyStore=/path_to/certificates/java/client-keystore.p12 \
    -Djavax.net.ssl.keyStorePassword=secret \
    -Djavax.net.ssl.trustStore=/path_to/certificates/java/client-truststore.p12 \
    -Djavax.net.ssl.trustStorePassword=secret \
    -jar MyApp.jar

    For this example and simplicity, I will hard code these property in the Java code itself:


    import redis.clients.jedis.Jedis;
    import java.net.URI;

    public class SSLTest {

    public static void main(String[] args) {

    System.setProperty("javax.net.ssl.keyStore", "/path_to/certificates/client-keystore.p12");
    System.setProperty("javax.net.ssl.keyStorePassword", "secret");

    System.setProperty("javax.net.ssl.trustStore","/path_to/certificates/client-truststore.p12");
    System.setProperty("javax.net.ssl.trustStorePassword","secret");

    URI uri = URI.create("rediss://127.0.0.1:12000");

    Jedis jedis = new Jedis(uri);
    jedis.auth("secretdb01");


    System.out.println(jedis.info("SERVER"));
    jedis.close();
    }

    }
    • line 8-12, the system environment variables are set to point to the keystore and trust store (this should be externalized)
    • line 14, the Redis URL start with rediss with 2 s to indicate that the connection should be encrypted
    • line 17, set the database password

    More information in the documentation "Using Redis with Java".

    Conclusion

    In this article, you learnt how to:

    • Retrieve the Redis Server certificate
    • Generate a client certificate
    • Protect your database to enforce transport level security (TLS) with 2 ways authentication
    • Connect to the database from redis-cli, Python, Node and Java

    References

    - + \ No newline at end of file diff --git a/howtos/shoppingcart/index.html b/howtos/shoppingcart/index.html index dbacb2d70d..50b5162cda 100644 --- a/howtos/shoppingcart/index.html +++ b/howtos/shoppingcart/index.html @@ -4,7 +4,7 @@ How to build a Shopping cart app using NodeJS and Redis | The Home of Redis Developers - + @@ -22,7 +22,7 @@ The most interesting part, at least for now, is located in the src directory(directory structure is shown below):

    The main.js file is the main JavaScript file of the application, which will load all common elements and call the App.vue main screen. The App.vue is a file that contains in the HTML, CSS, and JavaScript for a specific page or template. As an entry point for the application, this part is shared by all screens by default, so it is a good place to write the notification-client piece in this file. The public/index.html is the static entry point from where the DOM will be loaded.

    Directory Structure:

    % tree
    .
    ├── App.vue
    ├── assets
    │ ├── RedisLabs_Illustration.svg
    │ └── products
    │ ├── 1f1321bb-0542-45d0-9601-2a3d007d5842.jpg
    │ ├── 42860491-9f15-43d4-adeb-0db2cc99174a.jpg
    │ ├── 63a3c635-4505-4588-8457-ed04fbb76511.jpg
    │ ├── 6d6ca89d-fbc2-4fc2-93d0-6ee46ae97345.jpg
    │ ├── 97a19842-db31-4537-9241-5053d7c96239.jpg
    │ ├── e182115a-63d2-42ce-8fe0-5f696ecdfba6.jpg
    │ ├── efe0c7a3-9835-4dfb-87e1-575b7d06701a.jpg
    │ ├── f5384efc-eadb-4d7b-a131-36516269c218.jpg
    │ ├── f9a6d214-1c38-47ab-a61c-c99a59438b12.jpg
    │ └── x341115a-63d2-42ce-8fe0-5f696ecdfca6.jpg
    ├── components
    │ ├── Cart.vue
    │ ├── CartItem.vue
    │ ├── CartList.vue
    │ ├── Info.vue
    │ ├── Product.vue
    │ ├── ProductList.vue
    │ └── ResetDataBtn.vue
    ├── config
    │ └── index.js
    ├── main.js
    ├── plugins
    │ ├── axios.js
    │ └── vuetify.js
    ├── store
    │ ├── index.js
    │ └── modules
    │ ├── cart.js
    │ └── products.js
    └── styles
    └── styles.scss

    8 directories, 27 files

    In the client directory, under the subdirectory src, open the file App.vue. You will see the below content:

    <template>
    <v-app>
    <v-container>
    <div class="my-8 d-flex align-center">
    <div class="pa-4 rounded-lg red darken-1">
    <v-icon color="white" size="45">mdi-cart-plus</v-icon>
    </div>
    <h1 class="ml-6 font-weight-regular">Shopping Cart demo</h1>
    </div>
    </v-container>

    <v-container>
    <v-row>
    <v-col cols="12" sm="7" md="8">
    <info />
    <product-list :products="products" />
    </v-col>
    <v-col cols="12" sm="5" md="4" class="d-flex flex-column">
    <cart />
    <reset-data-btn class="mt-6" />
    </v-col>
    </v-row>

    <v-footer class="mt-12 pa-0">
    © Copyright 2021 | All Rights Reserved Redis
    </v-footer>
    </v-container>
    </v-app>
    </template>

    <script>
    import { mapGetters, mapActions } from 'vuex';
    import Cart from '@/components/Cart';
    import ProductList from '@/components/ProductList';
    import ResetDataBtn from '@/components/ResetDataBtn.vue';
    import Info from '@/components/Info';

    export default {
    name: 'App',

    components: {
    ProductList,
    Cart,
    ResetDataBtn,
    Info
    },

    computed: {
    ...mapGetters({
    products: 'products/getProducts'
    })
    },

    async created() {
    await this.fetchProducts();
    },

    methods: {
    ...mapActions({
    fetchProducts: 'products/fetch'
    })
    }
    };
    </script>

    This is client-side code. Here API returns, among other things, links to icons suitable for use on Maps. If you follow the flow through, you’ll see the map markers are loading those icons directly using the include URLs.

    Running/Testing the web client

    $ cd client
    $ npm run serve

    > redis-shopping-cart-client@1.0.0 serve
    > vue-cli-service serve

    INFO Starting development server...
    98% after emitting CopyPlugin

    DONE Compiled successfully in 7733ms 7:15:56 AM


    App running at:
    - Local: http://localhost:8081/
    - Network: http://192.168.43.81:8081/

    Note that the development build is not optimized.
    To create a production build, run npm run build.

    Let us click on the first item “256GB Pendrive” and try to check out this product. Once you add it to the cart, you will see the below output using redis-cli monitor command:

    1613320256.801562 [0 172.22.0.1:64420] "json.get" "product:97a19842-db31-4537-9241-5053d7c96239"
    1613320256.803062 [0 172.22.0.1:64420] "hget"
    ...
    1613320256.805950 [0 172.22.0.1:64420] "json.set" "product:97a19842-db31-4537-9241-5053d7c96239" "." "{\"id\":\"97a19842-db31-4537-9241-5053d7c96239\",\"name\":\"256BG Pendrive\",\"price\":\"60.00\",\"stock\":1}"
    1613320256.807792 [0 172.22.0.1:64420] "set" "sess:Ii9njXZd6zeUViL3tKJimN5zU7Samfze"
    ...
    1613320256.823055 [0 172.22.0.1:64420] "scan" "0" "MATCH" "product:*"
    ...
    1613320263.232527 [0 172.22.0.1:64420] "hgetall" "cart:bdee1606395f69985e8f8e01d3ada8c4"
    1613320263.233752 [0 172.22.0.1:64420] "set" "sess:gXk5K9bobvrR790-HFEoi3bQ2kP9YmjV" "{\"cookie\":{\"originalMaxAge\":10800000,\"expires\":\"2021-02-14T19:31:03.233Z\",\"httpOnly\":true,\"path\":\"/\"},\"cartId\":\"bdee1606395f69985e8f8e01d3ada8c4\"}" "EX" "10800"
    1613320263.240797 [0 172.22.0.1:64420] "scan" "0" "MATCH" "product:*"
    1613320263.241908 [0 172.22.0.1:64420] "scan" "22" "MATCH" "product:*"

    "{\"cookie\":{\"originalMaxAge\":10800000,\"expires\":\"2021-02-14T19:31:03.254Z\",\"httpOnly\":true,\"path\":\"/\"},\"cartId\":\"4bc231293c5345370f8fab83aff52cf3\"}" "EX" "10800"

    Shopping Cart

    Conclusion

    Storing shopping cart data in Redis is a good idea because it lets you retrieve the data very fast at any time and persist this data if needed. As compared to cookies that store the entire shopping cart data in session that is bloated and relatively slow in operation, storing the shopping cart data in Redis speeds up the shopping cart’s read and write performance , thereby improving the user experience.

    Reference

    - + \ No newline at end of file diff --git a/howtos/socialnetwork/index.html b/howtos/socialnetwork/index.html index 1ad1869893..f0d9735de7 100644 --- a/howtos/socialnetwork/index.html +++ b/howtos/socialnetwork/index.html @@ -4,7 +4,7 @@ How to Build a Social Network Application using Redis Stack and NodeJS | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    How to Build a Social Network Application using Redis Stack and NodeJS


    Profile picture for Julian Mateu
    Author:
    Julian Mateu, Sr. Backend Software Engineer at Globality, Inc.
    Profile picture for Manuel Aguirre
    Author:
    Manuel Aguirre, Backend Engineer at Baseline Spain

    image

    In this blog post we’ll build a social network application using Redis Stack and NodeJS. This is the idea that we used for our app Skillmarket.

    The goal of the application is to match users with complementary skills. It will allow users to register and provide some information about themselves, like location, areas of expertise and interests. Using search in Redis Stack it will match two users who are geographically close, and have complementary areas of expertise and interests, e.g., one of them knows French and want to learn Guitar and the other knows Guitar and want to learn French.

    The full source code of our application can be found in GitHub (note that we used some features like FT.ADD which now are deprecated):

    We will be using a more condensed version of the backend which can be found in the Skillmarket Blogpost GitHub repo.

    Refer to the official tutorial for more information about search in Redis Stack.

    Getting Familiar with search in Redis Stack

    Launching search in RedisStack in a Docker container

    Let’s start by launching Redis from the Redis Stack image using Docker:

    docker run -d --name redis redis/redis-stack:latest

    Here we use the docker run command to start the container and pull the image if it is not present. The -d flag tells docker to launch the container in the background (detached mode). We provide a name with --name redis which will allow us to refer to this container with a friendly name instead of the hash or the random name docker will assign to it.

    Finally, redislabs/readisearch:latest tells docker to use the latest version of the redislabs/readisearch image

    Once the image starts, we can use docker exec to launch a terminal inside the container, using the -it flag (interactive tty) and specifying the redis name provided before when creating the image, and the bash command:

    docker exec -it redis bash

    Once inside the container, let’s launch a redis-cli instance to familiarize ourselves with the CLI:

    redis-cli

    You will notice the prompt now indicates we’re connected to 127.0.0.1:6379

    Creating Users

    We’ll use a Hash as the data structure to store information about our users. This will be a proof of concept, so our application will only use Redis as the data store. For a real life scenario, it would probably be better to have a primary data store which is the authoritative source of user data, and use Redis as the search index which can be used to speed up searches.

    In a nutshell, you can think of a hash as a key/value store where the key can be any string we want, and the values are a document with several fields. It’s common practise to use the hash to store many different types of objects, so they can be prefixed with their type, so a key would take the form of "object_type:id".

    An index will then be used on this hash data structure, to efficiently search for values of given fields. The following diagram taken from the search docs exeplifies this with a database for movies:

    alt_text

    Use the help @hash command (or refer to the documentation) to get a list of commands that can be used to manipulate hashes. To get help for a single command, like HSET let’s type help HSET:

    127.0.0.1:6379> help hset

    HSET key field value [field value ...]
    summary: Set the string value of a hash field
    since: 2.0.0
    group: hash

    As we see, we can provide a key and a list of field value pairs.

    We’ll create a user in the hash table by using user:id as the key, and we’ll provide the fields expertises, interests and location:

    HSET users:1 name "Alice" expertises "piano, dancing" interests "spanish, bowling" location "2.2948552,48.8736537"

    HSET users:2 name "Bob" expertises "french, spanish" interests "piano" location "2.2945412,48.8583206"

    HSET users:3 name "Charles" expertises "spanish, bowling" interests "piano, dancing" location "-0.124772,51.5007169"

    Query to match users

    Here we can see the power of the search index, which allows us to query by tags (we provide a list of values, such as interests, and it will return any user whose interests match at least one value in the list), and Geo (we can ask for users whose location is at a given radius in km from a point).

    To be able to do this, we have to instruct search to create an index:

    FT.CREATE idx:users ON hash PREFIX 1 "users:" SCHEMA interests TAG expertises TAG location GEO

    We use the FT.CREATE command to create a full text search index named idx:users. We specify ON hash to indicate that we’re indexing the hash table, and provide PREFIX 1 "users:" to indicate that we should index any document whose key starts with the prefix “users:”. Finally we indicate the SCHEMA of the index by providing a list of fields to index, and their type.

    Finally, we can query the index using the FT.SEARCH command (see the query syntax reference):

    127.0.0.1:6379> FT.SEARCH idx:users "@interests:{dancing|piano} @expertises:{spanish|bowling} @location:[2.2948552 48.8736537 5 km]"
    1) (integer) 1
    2) "users:2"
    3) 1) "name"
    2) "Bob"
    3) "expertises"
    4) "french, spanish"
    5) "interests"
    6) "piano"
    7) "location"
    8) "2.2945412,48.8583206"

    In this case we’re looking for matches for Alice, so we use her expertises in the interests field of the query, and her interests in the expertises field. We also search for users in a 5km radius from her location, and we get Bob as a match.

    If we expand the search radius to 500km we’ll also see that Charles is returned:

    127.0.0.1:6379> FT.SEARCH idx:users "@interests:{dancing|piano} @expertises:{spanish|bowling} @location:[2.2948552 48.8736537 500 km]"
    1) (integer) 2
    2) "users:3"
    3) 1) "name"
    2) "Charles"
    3) "expertises"
    4) "spanish, bowling"
    5) "interests"
    6) "piano, dancing"
    7) "location"
    8) "-0.124772,51.5007169"
    4) "users:2"
    5) 1) "name"
    2) "Bob"
    3) "expertises"
    4) "french, spanish"
    5) "interests"
    6) "piano"
    7) "location"
    8) "2.2945412,48.8583206"

    Cleaning Up

    We can now remove the docker instance and move on to building the web application, running the following command from outside the instance:

     docker rm -f redis

    Building a minimal backend in Typescript

    After understanding how the index works, let’s build a minimal backend API in NodeJS that will allow us to create a user, and query for matching users.

    note

    This is just an example, and we’re not providing proper validation or error handling, nor other features required for the backend (e.g. authentication).

    Redis client

    We’ll use the node-redis package to create a client:

    const {
    REDIS_PORT = 6379,
    REDIS_HOST = 'localhost',
    } = process.env;

    const client: RediSearchClient = createClient({
    port: Number(REDIS_PORT),
    host: REDIS_HOST,
    });

    All the functions in the library use callbacks, but we can use promisify to enable the async/await syntax:

    client.hgetallAsync = promisify(client.hgetall).bind(client);
    client.hsetAsync = promisify(client.hset).bind(client);
    client.ft_createAsync = promisify(client.ft_create).bind(client);
    client.ft_searchAsync = promisify(client.ft_search).bind(client);

    Finally, let’s define a function to create the user index, as we did before in the CLI example:

    async function createUserIndex() {
    client.ft_createAsync(
    'idx:users',
    ['ON', 'hash', 'PREFIX', '1', 'users:', 'SCHEMA', 'interests', 'TAG', 'expertises', 'TAG', 'location', 'GEO']
    );
    }

    User controller

    Let’s define the functions that the controller will use to expose a simple API on top of Redis. We’ll define 3 functions: - findUserById(userId) - createUser(user) - findMatchesForUser(user)

    But first let’s define the model we’ll use for the users:

    interface Location {
    latitude: number;
    longitude: number;
    };

    interface User {
    id?: string;
    name: string;
    interests: string[];
    expertises: string[];
    location: Location
    };

    Let’s start with the function to create a user from the model object:

    async function createUser(user: User): Promise<string> {
    const id = uuid();
    redisearchClient.hsetAsync(`users:${id}`, _userToSetRequestString(user));
    return id;
    }

    function _userToSetRequestString(user: User): string[] {
    const { id, location, interests, expertises, ...fields } = user;
    let result = Object.entries(fields).flat();
    result.push('interests', interests.join(', '));
    result.push('expertises', expertises.join(', '));
    result.push('location', `${location.longitude},${location.latitude}`);
    return result;
    }

    We will create a UUID for the user, and then transform the TAG and GEO fields to the redis format. Here’s an example of how these two formats look like:

    my image

    Let’s now look at the logic to retrieve an existing user from the Hash table using HGETALL:

    async function findUserById(userId: string): Promise<User> {
    const response = await redisearchClient.hgetallAsync(`users:${userId}`);
    if (!response) {
    throw new Error('User Not Found');
    }
    return _userFromFlatEntriesArray(userId, Object.entries(response).flat());
    }

    function _userFromFlatEntriesArray(id: string, flatEntriesArray: any[]): User {
    let user: any = {};

    // The flat entries array contains all keys and values as elements in an array, e.g.:
    // [key1, value1, key2, value2]
    for (let j = 0; j < flatEntriesArray.length; j += 2) {
    let key: string = flatEntriesArray[ j ];
    let value: string = flatEntriesArray[ j + 1 ];
    user[ key ] = value;
    }

    const location: string[] = user.location.split(',');
    user.location = { longitude: Number(location[ 0 ]), latitude: Number(location[ 1 ]) };
    user.expertises = user.expertises.split(', ');
    user.interests = user.interests.split(', ');

    return {id, ...user};
    }

    Here we have the inverse logic, where we want to split the TAG and GEO fields into a model object. There’s also the fact that HGETALL returns the field names and values in an array, and we need to build the model object from that.

    Let’s finally take a look at the logic to find matches for a given user:

    async function findMatchesForUser(user: User, radiusKm: number): Promise<User[]> {
    const allMatches: User[] = await _findMatches(user.interests, user.expertises, user.location, radiusKm);
    return allMatches.filter(u => u.id !== user.id);
    }

    async function _findMatches(expertises: string[], interests: string[], location: Location, radiusKm: number): Promise<User[]> {
    let query = `@interests:{${interests.join('|')}}`
    query += ` @expertises:{${expertises.join('|')}}`
    query += ` @location:[${location.longitude} ${location.latitude} ${radiusKm} km]`;

    const response = await redisearchClient.ft_searchAsync('idx:users', query);

    return _usersFromSearchResponseArray(response);
    }

    function _usersFromSearchResponseArray(response: any[]): User[] {
    let users = [];

    // The search response is an array where the first element indicates the number of results, and then
    // the array contains all matches in order, one element is they key and the next is the object, e.g.:
    // [2, key1, object1, key2, object2]
    for (let i = 1; i <= 2 * response[ 0 ]; i += 2) {
    const user: User = _userFromFlatEntriesArray(response[ i ].replace('users:', ''), response[ i + 1 ]);
    users.push(user);
    }

    return users;
    }

    Here we swap interests and expertises to find the complementary skill set, and we build the query that we used previously in the CLI example. we finally call the FT.SEARCH function, and we build the model object from the response, which comes as an array. Results are filtered to exclude the current user from the matches list.

    Web API

    Finally, we can build a trivial web API using express, exposing a POST /users endpoint to create a user, a GET /users/:userId endpoint to retrieve a user, and a GET /users/:userId/matches endpoint to find matches for the given user (the desired radiusKm can be optionally specified as a query parameter)

    app.post('/users', async (req, res) => {
    const user: User = req.body;

    if (!user || !user.name || !user.expertises || !user.interests || user.location.latitude === undefined || user.location.longitude === undefined) {
    res.status(400).send('Missing required fields');
    } else {
    const userId = await userController.createUser(user);
    res.status(200).send(userId);
    }
    });

    app.get("/users/:userId", async (req, res) => {
    try {
    const user: User = await userController.findUserById(req.params.userId);
    res.status(200).send(user);
    } catch (e) {
    res.status(404).send();
    }
    });

    app.get("/users/:userId/matches", async (req, res) => {
    try {
    const radiusKm: number = Number(req.query.radiusKm) || 500;
    const user: User = await userController.findUserById(req.params.userId);
    const matches: User[] = await userController.findMatchesForUser(user, radiusKm);
    res.status(200).send(matches);
    } catch (e) {
    console.log(e)
    res.status(404).send();
    }
    });

    Full code example

    The code used in this blogpost can be found in the GitHub repo. The backend together with redis can be launched using docker compose:

     docker compose up -d --build

    The backend API will be exposed on port 8080. We can see the logs with docker compose logs, and use a client to query it. Here’s an example using httpie:

    http :8080/users \
    name="Alice" \
    expertises:='["piano", "dancing"]' \
    interests:='["spanish", "bowling"]' \
    location:='{"longitude": 2.2948552, "latitude": 48.8736537}'

    ----------
    HTTP/1.1 200 OK
    Connection: keep-alive
    Content-Length: 36
    Content-Type: text/html; charset=utf-8
    Date: Mon, 01 Nov 2021 05:24:52 GMT
    ETag: W/"24-dMinMMphAGzfWiCs49RBYnyK+r8"
    Keep-Alive: timeout=5
    X-Powered-By: Express

    03aef405-ef37-4254-ab3c-a5ddfbc4f04e
    http ":8080/users/03aef405-ef37-4254-ab3c-a5ddfbc4f04e/matches?radiusKm=15"
    HTTP/1.1 200 OK
    Connection: keep-alive
    Content-Length: 174
    Content-Type: application/json; charset=utf-8
    Date: Mon, 01 Nov 2021 05:26:29 GMT
    ETag: W/"ae-3k2/swmuFaJd7BNHrkgvS/S+h2g"
    Keep-Alive: timeout=5
    X-Powered-By: Express
    [
    {
    "expertises": [
    "french",
    " spanish"
    ],
    "id": "58e81f09-d9fa-4557-9b8f-9f48a9cec328",
    "interests": [
    "piano"
    ],
    "location": {
    "latitude": 48.8583206,
    "longitude": 2.2945412
    },
    "name": "Bob"
    }
    ]

    Finally cleanup the environment:

    docker compose down --volumes --remove-orphans

    References

    Redis Launchpad
    - + \ No newline at end of file diff --git a/howtos/solutions/caching-architecture/cache-prefetching/index.html b/howtos/solutions/caching-architecture/cache-prefetching/index.html index e6df8d2334..0f821cea71 100644 --- a/howtos/solutions/caching-architecture/cache-prefetching/index.html +++ b/howtos/solutions/caching-architecture/cache-prefetching/index.html @@ -4,7 +4,7 @@ How to use Redis for Cache Prefetching Strategy | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    How to use Redis for Cache Prefetching Strategy


    Profile picture for Prasan Kumar
    Author:
    Prasan Kumar, Technical Solutions Developer at Redis
    Profile picture for Will Johnston
    Author:
    Will Johnston, Developer Growth Manager at Redis

    GITHUB CODE

    Below are the commands to clone the source code (frontend and backend) for the application used in this tutorial

    git clone https://github.com/redis-developer/ebook-speed-mern-frontend.git

    git clone https://github.com/redis-developer/ebook-speed-mern-backend.git

    What is cache prefetching?

    Cache prefetching is a technique used in database management systems (DBMS) to improve query performance by anticipating and fetching data from the storage subsystem before it is explicitly requested by a query.

    There are three main strategies for cache prefetching:

    1. Sequential prefetching: This approach anticipates that data will be accessed in a sequential manner, such as when scanning a table or index. It prefetches the next set of data blocks or pages in the sequence to ensure they are available in cache when needed.
    2. Prefetching based on query patterns: Some database systems can analyze past query patterns to predict which data is likely to be accessed in the future. By analyzing these patterns, the DBMS can prefetch relevant data and have it available in cache when a similar query is executed.
    3. Prefetching based on data access patterns: In some cases, data access patterns can be derived from the application logic or schema design. By understanding these patterns, the database system can prefetch data that is likely to be accessed soon.

    This tutorial will cover the third strategy, prefetching based on data access patterns.

    Imagine you're building a movie streaming platform. You need to be able to provide your users with a dashboard that allows them to quickly find the movies they want to watch. You have an extensive database filled with movies, and you have them categorized by things like country of origin, genre, language, etc. This data changes infrequently, and is regularly referenced all over your app and by other data. This kind of data that is long-lived and changes infrequently is called "master data."

    One ongoing developer challenge is to swiftly create, read, update, and delete master data. You might store your master data in a system of record like a SQL database or document database, and then use Redis as a cache to speed up lookups for that data. Then, when an application requests master data, instead of coming from the system of record, the master data is served from Redis. This is called the "master data-lookup" pattern.

    From a developer's point of view, "master data lookup" refers to the process by which master data is accessed in business transactions, in application setup, and any other way that software retrieves the information. Examples of master data lookup include fetching data for user interface (UI) elements (such as drop-down dialogs, select values, multi-language labels), fetching constants, user access control, theme, and other product configuration.

    Below you will find a diagram of the data flow for prefetching master data using Redis with MongoDB as the system of record.

    pattern

    The steps involved in fetching data are as follows:

    1. Read the master data from MongoDB on application startup and store a copy of the data in Redis. This pre-caches the data for fast retrieval. Use a script or a cron job to regularly copy latest master data to Redis.
    2. The application requests master data.
    3. Instead of MongoDB serving the data, the master data will be served from Redis.

    Why you should use Redis for cache prefetching

    1. Serve prefetched data at speed: By definition, nearly every application requires access to master or other common data. Pre-caching such frequent data with Redis delivers it to users at high speed.
    2. Support massive tables: Master tables often have millions of records. Searching through them can cause performance bottlenecks. Use Redis to perform real-time search on the large tables to increase performance with sub-millisecond response.
    3. Postpone expensive hardware and software investments: Defer costly infrastructure enhancements by using Redis. Get the performance and scaling benefits without asking the CFO to write a check.
    tip

    If you use Redis Enterprise, cache prefetching is easier due to its support for JSON and search. You also get additional features such as real-time performance, high scalability, resiliency, and fault tolerance. You can also call upon high-availability features such as Active-Active geo-redundancy.

    Cache prefetching in a NodeJS application with Redis and MongoDB

    Demo application

    The demo application used in the rest of this tutorial showcases a movie application with basic create, read, update, and delete (CRUD) operations. demo-01

    The movie application dashboard contains a search section at the top and a list of movie cards in the middle. The floating plus icon displays a pop-up when the user selects it, permitting the user to enter new movie details. The search section has a text search bar and a toggle link between text search and basic (that is, form-based) search. Each movie card has edit and delete icons, which are displayed when a mouse hovers over the card.

    GITHUB CODE

    Below are the commands to clone the source code (frontend and backend) for the application used in this tutorial

    git clone https://github.com/redis-developer/ebook-speed-mern-frontend.git

    git clone https://github.com/redis-developer/ebook-speed-mern-backend.git

    Certain fields used in the demo application serve as master data, including movie language, country, genre, and ratings. They are master data because they are required for almost every application transaction. For example, the pop-up dialog (seen below) that appears when a user who wants to add a new movie clicks the movie application plus the icon. The pop-up includes drop-down menus for both country and language. In this case, Redis stores and provides the values.

    demo-03

    Prefetching data with Redis and MongoDB

    The code snippet below is used to prefetch MongoDB JSON documents and store them in Redis (as JSON) using the Redis OM for Node.js library.

    async function insertMasterCategoriesToRedis() {
    ...
    const _dataArr = await getMasterCategories(); //from MongoDb
    const repository = MasterCategoryRepo.getRepository();

    if (repository && _dataArr && _dataArr.length) {
    for (const record of _dataArr) {
    const entity = repository.createEntity(record);
    entity.categoryTag = [entity.category]; //for tag search
    //adds JSON to Redis
    await repository.save(entity);
    }
    }
    ...
    }

    async function getMasterCategories() {
    //fetching data from MongoDb
    ...
    db.collection("masterCategories").find({
    statusCode: {
    $gt: 0,
    },
    category: {
    $in: ["COUNTRY", "LANGUAGE"],
    },
    });
    ...
    }

    You can also check RedisInsight to verify that JSON data is inserted, as seen below:

    Redis-jsonRedis-json
    tip

    RedisInsight is the free redis GUI for viewing data in redis. Click here to download.

    Querying prefetched data from Redis

    Prior to prefetching with Redis, the application searched the static database (MongoDB) to retrieve the movie's country and language values. As more people started using the application, the database became overloaded with queries. The application was slow and unresponsive. To solve this problem, the application was modified to use Redis to store the master data. The code snippet below shows how the application queries Redis for the master data, specifically the country and language values for the dropdown menus:

    *** With Redis ***
    *** Redis OM Node query ***
    function getMasterCategories() {
    ...
    masterCategoriesRepository
    .search()
    .where("statusCode")
    .gt(0)
    .and("categoryTag")
    .containOneOf("COUNTRY", "LANGUAGE");
    ...
    }

    Ready to use Redis for cache prefetching?

    In this tutorial you learned how to use Redis for cache prefetching with a "master data lookup" example. While this is one way Redis is used in an application, it's possible to incrementally adopt Redis wherever needed with other caching strategies/patterns. For more resources on the topic of caching, check out the links below:

    Additional resources

    - + \ No newline at end of file diff --git a/howtos/solutions/caching-architecture/common-caching/caching-movie-app/index.html b/howtos/solutions/caching-architecture/common-caching/caching-movie-app/index.html index 87fd10aa9f..da980b541f 100644 --- a/howtos/solutions/caching-architecture/common-caching/caching-movie-app/index.html +++ b/howtos/solutions/caching-architecture/common-caching/caching-movie-app/index.html @@ -4,7 +4,7 @@ caching-movie-app | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    caching-movie-app

    The demo application used in the rest of this tutorial showcases a movie application with basic create, read, update, and delete (CRUD) operations. demo-01

    The movie application dashboard contains a search section at the top and a list of movie cards in the middle. The floating plus icon displays a pop-up when the user selects it, permitting the user to enter new movie details. The search section has a text search bar and a toggle link between text search and basic (that is, form-based) search. Each movie card has edit and delete icons, which are displayed when a mouse hovers over the card.

    - + \ No newline at end of file diff --git a/howtos/solutions/caching-architecture/common-caching/redis-gears/index.html b/howtos/solutions/caching-architecture/common-caching/redis-gears/index.html index 8d29a2e251..050d88084b 100644 --- a/howtos/solutions/caching-architecture/common-caching/redis-gears/index.html +++ b/howtos/solutions/caching-architecture/common-caching/redis-gears/index.html @@ -4,7 +4,7 @@ redis-gears | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    redis-gears

    What is RedisGears?

    RedisGears is a programmable serverless engine for transaction, batch, and event-driven data processing allowing users to write and run their own functions on data stored in Redis.

    Functions can be implemented in different languages, including Python and C, and can be executed by the RedisGears engine in one of two ways:

    1. Batch: triggered by the Run action, execution is immediate and on existing data
    2. Event: triggered by the Register action, execution is triggered by new events and on their data

    Some batch type operations RedisGears can do:

    • Run an operation on all keys in the KeySpace or keys matching a certain pattern like :
      • Prefix all KeyNames with person:
      • Delete all keys whose value is smaller than zero
      • Write all the KeyNames starting with person: to a set
    • Run a set of operations on all(or matched) keys where the output of one operation is the input of another like
      • Find all keys with a prefix person: (assume all of them are of type hash)
      • Increase user's days_old by 1, then sum them by age group (10-20, 20-30 etc.)
      • Add today's stats to the sorted set of every client, calculate last 7 days average and save the computed result in a string

    Some event type operations RedisGears can do:

    • RedisGears can also register event listeners that will trigger a function execution every time a watched key is changed like
      • Listen for all operations on all keys and keep a list of all KeyNames in the KeySpace
      • Listen for DEL operations on keys with a prefix I-AM-IMPORTANT: and asynchronously dump them in a "deleted keys" log file
      • Listen for all HINCRBY operations on the element score of keys with a prefix player: and synchronously update a user's level when the score reaches 1000

    How do I use RedisGears?

    Run the Docker container:

    docker run -p 6379:6379 redislabs/redisgears:latest

    For a very simple example that lists all keys in your Redis database with a prefix of person: create the following python script and name it hello_gears.py :

    gb = GearsBuilder() gb.run('person:*')

    Execute your function:

    docker exec -i redisgears redis-cli RG.PYEXECUTE "`cat hello_gears.py`"

    Using gears-cli

    The gears-cli tool provides an easier way to execute RedisGears functions, specially if you need to pass some parameters too.

    It's written in Python and can be installed with pip:

    pip install gears-cli
    gears-cli hello_gears.py REQUIREMENTS rgsync

    Usage:

    gears-cli --help
    usage: gears-cli [-h] [--host HOST] [--port PORT]
    [--requirements REQUIREMENTS] [--password PASSWORD] path [extra_args [extra_args ...]]

    RedisGears references

    - + \ No newline at end of file diff --git a/howtos/solutions/caching-architecture/common-caching/source-code-movie-app/index.html b/howtos/solutions/caching-architecture/common-caching/source-code-movie-app/index.html index c995f07bbb..20ecb589ed 100644 --- a/howtos/solutions/caching-architecture/common-caching/source-code-movie-app/index.html +++ b/howtos/solutions/caching-architecture/common-caching/source-code-movie-app/index.html @@ -4,7 +4,7 @@ source-code-movie-app | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/howtos/solutions/caching-architecture/common-caching/write-behind-vs-write-through/index.html b/howtos/solutions/caching-architecture/common-caching/write-behind-vs-write-through/index.html index c12755018e..4007c8b5ec 100644 --- a/howtos/solutions/caching-architecture/common-caching/write-behind-vs-write-through/index.html +++ b/howtos/solutions/caching-architecture/common-caching/write-behind-vs-write-through/index.html @@ -4,7 +4,7 @@ write-behind-vs-write-through | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    write-behind-vs-write-through

    There are two related write patterns, and the main differences between them are as follows

    Write BehindWrite through
    Syncs data asynchronouslySyncs data synchronously/ immediately
    Data between the cache and the system of record (database) is inconsistent for a short timeData between the cache and the system of record (database) is always consistent
    - + \ No newline at end of file diff --git a/howtos/solutions/caching-architecture/write-behind/index.html b/howtos/solutions/caching-architecture/write-behind/index.html index 8d54f25039..9399b7a626 100644 --- a/howtos/solutions/caching-architecture/write-behind/index.html +++ b/howtos/solutions/caching-architecture/write-behind/index.html @@ -4,7 +4,7 @@ How to use Redis for Write-behind Caching | The Home of Redis Developers - + @@ -15,7 +15,7 @@ demo-01

    The movie application dashboard contains a search section at the top and a list of movie cards in the middle. The floating plus icon displays a pop-up when the user selects it, permitting the user to enter new movie details. The search section has a text search bar and a toggle link between text search and basic (that is, form-based) search. Each movie card has edit and delete icons, which are displayed when a mouse hovers over the card.

    GITHUB CODE

    Below are the commands to clone the source code (frontend and backend) for the application used in this tutorial

    git clone https://github.com/redis-developer/ebook-speed-mern-frontend.git

    git clone https://github.com/redis-developer/ebook-speed-mern-backend.git

    To demonstrate this pattern using the movie application, imagine that the user opens the pop-up to add a new movie.

    demo-02

    Instead of the application immediately storing the data in MongoDB, the application writes the changes to Redis. In the background, RedisGears automatically synchronizes the data with the MongoDB database.

    Programming Redis using the write-behind pattern

    Developers need to load some code (say python in our example) to the Redis server before using the write-behind pattern (which syncs data from Redis to MongoDB). The Redis server has a RedisGears module that interprets the python code and syncs the data from Redis to MongoDB.

    Loading the Python code is easier than it sounds. Simply replace database details in the Python file and then load the file to the Redis server.

    Create the Python file (shown below, and available online). Then update the MongoDB connection details, database, collection, and primary key name to sync.

    movies-write-behind.py
    # Gears Recipe for a single write behind

    # import redis gears & mongo db libs
    from rgsync import RGJSONWriteBehind, RGJSONWriteThrough
    from rgsync.Connectors import MongoConnector, MongoConnection

    # change mongodb connection (admin)
    # mongodb://usrAdmin:passwordAdmin@10.10.20.2:27017/dbSpeedMernDemo?authSource=admin
    mongoUrl = 'mongodb://usrAdmin:passwordAdmin@10.10.20.2:27017/admin'

    # MongoConnection(user, password, host, authSource?, fullConnectionUrl?)
    connection = MongoConnection('', '', '', '', mongoUrl)

    # change MongoDB database
    db = 'dbSpeedMernDemo'

    # change MongoDB collection & it's primary key
    movieConnector = MongoConnector(connection, db, 'movies', 'movieId')

    # change redis keys with prefix that must be synced with mongodb collection
    RGJSONWriteBehind(GB, keysPrefix='MovieEntity',
    connector=movieConnector, name='MoviesWriteBehind',
    version='99.99.99')
    What is a RedisGears recipe?

    A collection of RedisGears functions and any dependencies they may have that implement a high-level functional purpose is called a recipe. Example : "RGJSONWriteBehind" function in above python code

    There are two ways to load that Python file into the Redis server:

    1. Using the gears command-line interface (CLI)

    Find more information about the Gears CLI at gears-cli and rgsync.

    # install
    pip install gears-cli
    # If python file is located at “/users/tom/movies-write-behind.py”
    gears-cli --host <redisHost> --port <redisPort> --password <redisPassword> run /users/tom/movies-write-behind.py REQUIREMENTS rgsync pymongo==3.12.0
    1. Using the RG.PYEXECUTE from the Redis command line.

    Find more information at RG.PYEXECUTE.

    # Via redis cli
    RG.PYEXECUTE 'pythonCode' REQUIREMENTS rgsync pymongo==3.12.0

    The RG.PYEXECUTE command can also be executed from the Node.js code (Consult the sample Node file for more details)

    Find more examples at Redis Gears sync with MongoDB.

    Verifying the write-behind pattern using RedisInsight

    tip

    RedisInsight is the free redis GUI for viewing data in redis. Click here to download.

    The next step is to verify that RedisGears is syncing data between Redis and MongoDB.

    Insert a key starting with the prefix (that's specified in the Python file) using the Redis CLI

    redis-insight

    Next, confirm that the JSON is inserted in MongoDB too.

    mongo-compass

    You can also check RedisInsight to verify that the data is piped in via Streams for its consumers (like RedisGears).

    redis-insight-stream

    How does all that work with the demo application? Below is a code snipped to insert a movie. Once data is written to Redis, RedisGears automatically synchronizes it to MongoDB.

    BEFORE (using MongoDB)
    ...
    //(Node mongo query)
    if (movie) {
    //insert movie to MongoDB
    await db.collection("movies")
    .insertOne(movie);
    }
    ...
    AFTER (using Redis)
    ...
    //(Redis OM Node query)
    if (movie) {
    const entity = repository.createEntity(movie);
    //insert movie to Redis
    await moviesRepository.save(entity);
    }
    ...

    Ready to use Redis for write-behind caching?

    You now know how to use Redis for write-behind caching. It's possible to incrementally adopt Redis wherever needed with different strategies/patterns. For more resources on the topic of caching, check out the links below:

    Additional resources

    - + \ No newline at end of file diff --git a/howtos/solutions/caching-architecture/write-through/index.html b/howtos/solutions/caching-architecture/write-through/index.html index 2a1a3c80e9..51283d8305 100644 --- a/howtos/solutions/caching-architecture/write-through/index.html +++ b/howtos/solutions/caching-architecture/write-through/index.html @@ -4,7 +4,7 @@ How to use Redis for Write through caching strategy | The Home of Redis Developers - + @@ -15,7 +15,7 @@ So you need a way of quickly providing strong consistency of user data. In such situation, What you need is called the "write-through pattern."

    With the Write-through pattern, every time an application writes data to the cache, it also updates the records in the database, unlike Write behind the thread waits in this pattern until the write to the database is also completed.

    Below is a diagram of the write-through pattern for the application:

    write-through-pattern using Redis in a movie streaming application

    The pattern works as follows:

    1. The application reads and writes data to Redis.
    2. Redis syncs any changed data to the PostgreSQL database synchronously/ immediately.

    Note : the Redis server is blocked until a response from the main database is received.

    There are two related write patterns, and the main differences between them are as follows

    Write BehindWrite through
    Syncs data asynchronouslySyncs data synchronously/ immediately
    Data between the cache and the system of record (database) is inconsistent for a short timeData between the cache and the system of record (database) is always consistent

    Learn more about Write behind pattern

    Why you should use Redis for write-through caching

    Write-through caching with Redis ensures that the (critical data) cache is always up-to-date with the database, providing strong consistency and improving application performance.

    consider below scenarios of different applications :

    • E-commerce application: In an e-commerce application, write-through caching can be used to ensure consistency of product inventory. Whenever a customer purchases a product, the inventory count should be updated immediately to avoid overselling. Redis can be used to cache the inventory count, and every update to the count can be written through to the database. This ensures that the inventory count in the cache is always up-to-date, and customers are not able to purchase items that are out of stock.

    • Banking application: In a banking application, write-through caching can be used to ensure consistency of account balances. Whenever a transaction is made, the account balance should be updated immediately to avoid overdrafts or other issues. Redis can be used to cache the account balances, and every transaction can be written through to the database. This ensures that the balance in the cache is always up-to-date, and transactions can be processed with strong consistency.

    • Online gaming platform: Suppose you have an online gaming platform where users can play games against each other. With write-through caching, any changes made to a user's score or game state would be saved to the database and also cached in Redis. This ensures that any subsequent reads for that user's score or game state would hit the cache first. This helps to reduce the load on the database and ensures that the game state displayed to users is always up-to-date.

    • Claims Processing System: In an insurance claims processing system, claims data needs to be consistent and up-to-date across different systems and applications. With write-through caching in Redis, new claims data can be written to both the database and Redis cache. This ensures that different applications always have the most up-to-date information about the claims, making it easier for claims adjusters to access the information they need to process claims more quickly and efficiently.

    • Healthcare Applications: In healthcare applications, patient data needs to be consistent and up-to-date across different systems and applications. With write-through caching in Redis, updated patient data can be written to both the database and Redis cache, ensuring that different applications always have the latest patient information. This can help improve patient care by providing accurate and timely information to healthcare providers.

    • Social media application: In a social media application, write-through caching can be used to ensure consistency of user profiles. Whenever a user updates their profile, the changes should be reflected immediately to avoid showing outdated information to other users. Redis can be used to cache the user profiles, and every update can be written through to the database. This ensures that the profile information in the cache is always up-to-date, and users can see accurate information about each other.

    Redis programmability for write-through caching using RedisGears

    tip

    You can skip reading this section if you are already familiar with RedisGears)

    What is RedisGears?

    RedisGears is a programmable serverless engine for transaction, batch, and event-driven data processing allowing users to write and run their own functions on data stored in Redis.

    Functions can be implemented in different languages, including Python and C, and can be executed by the RedisGears engine in one of two ways:

    1. Batch: triggered by the Run action, execution is immediate and on existing data
    2. Event: triggered by the Register action, execution is triggered by new events and on their data

    Some batch type operations RedisGears can do:

    • Run an operation on all keys in the KeySpace or keys matching a certain pattern like :
      • Prefix all KeyNames with person:
      • Delete all keys whose value is smaller than zero
      • Write all the KeyNames starting with person: to a set
    • Run a set of operations on all(or matched) keys where the output of one operation is the input of another like
      • Find all keys with a prefix person: (assume all of them are of type hash)
      • Increase user's days_old by 1, then sum them by age group (10-20, 20-30 etc.)
      • Add today's stats to the sorted set of every client, calculate last 7 days average and save the computed result in a string

    Some event type operations RedisGears can do:

    • RedisGears can also register event listeners that will trigger a function execution every time a watched key is changed like
      • Listen for all operations on all keys and keep a list of all KeyNames in the KeySpace
      • Listen for DEL operations on keys with a prefix I-AM-IMPORTANT: and asynchronously dump them in a "deleted keys" log file
      • Listen for all HINCRBY operations on the element score of keys with a prefix player: and synchronously update a user's level when the score reaches 1000

    How do I use RedisGears?

    Run the Docker container:

    docker run -p 6379:6379 redislabs/redisgears:latest

    For a very simple example that lists all keys in your Redis database with a prefix of person: create the following python script and name it hello_gears.py :

    gb = GearsBuilder() gb.run('person:*')

    Execute your function:

    docker exec -i redisgears redis-cli RG.PYEXECUTE "`cat hello_gears.py`"

    Using gears-cli

    The gears-cli tool provides an easier way to execute RedisGears functions, specially if you need to pass some parameters too.

    It's written in Python and can be installed with pip:

    pip install gears-cli
    gears-cli hello_gears.py REQUIREMENTS rgsync

    Usage:

    gears-cli --help
    usage: gears-cli [-h] [--host HOST] [--port PORT]
    [--requirements REQUIREMENTS] [--password PASSWORD] path [extra_args [extra_args ...]]

    RedisGears references

    Programming Redis using the write-through pattern

    For our sample code, we will demonstrate writing users to Redis and then writing through to PostgreSQL. Use the docker-compose.yml file below to setup required environment:

    docker-compose.yml
    version: '3.9'
    services:
    redis:
    container_name: redis
    image: 'redislabs/redismod:latest'
    ports:
    - 6379:6379
    deploy:
    replicas: 1
    restart_policy:
    condition: on-failure
    postgres:
    image: postgres
    restart: always
    environment:
    POSTGRES_USER: root
    POSTGRES_PASSWORD: password
    POSTGRES_DB: example
    adminer:
    image: adminer
    restart: always
    ports:
    - 8080:8080

    To run the docker-compose file, run the following command:

    $ docker compose up -d

    This will create a Redis server, a PostgreSQL server, and an Adminer server. Adminer is a web-based database management tool that allows you to view and edit data in your database.

    Next, open your browser to http://localhost:8080/?pgsql=postgres&username=root&db=example&ns=public&sql=. You will have to input the password (which is password in the example above),

    adminer-login

    then you will be taken to a SQL command page. Run the following SQL command to create a table:

    users.sql
    CREATE TABLE users (
    id SERIAL PRIMARY KEY,
    username VARCHAR(255) UNIQUE NOT NULL,
    email VARCHAR(255) UNIQUE NOT NULL,
    password_hash VARCHAR(255) NOT NULL,
    first_name VARCHAR(255),
    last_name VARCHAR(255),
    date_of_birth DATE,
    created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
    updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
    );

    adminer-table-creation

    Developers need to load some code (say python in our example) to the Redis server before using the write-through pattern (which syncs data from Redis to the system of record). Redis server has a RedisGears module that interprets the python code and syncs the data from Redis to the system of record.

    Now, we need to create a RedisGears recipe that will write through to the PostgreSQL database. The following Python code will write through to the PostgreSQL database:

    write-through.py
    from rgsync import RGWriteThrough
    from rgsync.Connectors import PostgresConnector, PostgresConnection

    '''
    Create Postgres connection object
    '''
    connection = PostgresConnection('root', 'password', 'postgres:5432/example')

    '''
    Create Postgres users connector
    '''
    usersConnector = PostgresConnector(connection, 'users', 'id')

    usersMappings = {
    'username': 'username',
    'email': 'email',
    'pwhash': 'password_hash',
    'first': 'first_name',
    'last': 'last_name',
    'dob': 'date_of_birth',
    'created_at': 'created_at',
    'updated_at': 'updated_at',
    }

    RGWriteThrough(GB, keysPrefix='__', mappings=usersMappings,
    connector=usersConnector, name='UsersWriteThrough', version='99.99.99')

    Make sure you create the file "write-through.py" because the next instructions will use it. For the purpose of this example we are showing how to map Redis hash fields to PostgreSQL table columns. The RGWriteThrough function takes in the usersMapping, where the keys are the Redis hash keys and the values are the PostgreSQL table columns.

    What is a RedisGears recipe?

    A collection of RedisGears functions and any dependencies they may have that implement a high-level functional purpose is called a recipe. Example : "RGJSONWriteThrough" function in above python code

    The python file has a few dependencies in order to work. Below is the requirements.txt file that contains the dependencies, create it alongside the "write-through.py" file:

    requirements.txt
    rgsync
    psycopg2-binary
    cryptography

    There are two ways (gears CLI and RG.PYEXECUTE) to load that Python file into the Redis server:

    1. Using the gears command-line interface (CLI)

    Find more information about the Gears CLI at gears-cli and rgsync.

    # install
    pip install gears-cli

    To run our write-through recipe using gears-cli, we need to run the following command:

    $ gears-cli run --host localhost --port 6379 write-through.py --requirements requirements.txt

    You should get a response that says "OK". That is how you know you have successfully loaded the Python file into the Redis server.

    tip

    If you are on Windows, we recommend you use WSL to install and use gears-cli.

    1. Using the RG.PYEXECUTE from the Redis command line.
    # Via redis cli
    RG.PYEXECUTE 'pythonCode' REQUIREMENTS rgsync psycopg2-binary cryptography
    tip

    The RG.PYEXECUTE command can also be executed from the Node.js code (Consult the sample Node file for more details)

    tip

    Find more examples in the Redis Gears GitHub repository.

    Verifying the write-through pattern using RedisInsight

    tip

    RedisInsight is the free redis GUI for viewing data in redis. Click here to download.

    The next step is to verify that RedisGears is syncing data between Redis and PostgreSQL. Note that in our Python file we specified a prefix for the keys. In this case, we specified __ as the prefix, users as the table, and id as the unique identifier. This instructs RedisGears to look for the following key format: __{users:<id>}. Try running the following command in the Redis command line:

    hset __{users:1} username john email john@gmail.com pwhash d1e8a70b5ccab1dc2f56bbf7e99f064a660c08e361a35751b9c483c88943d082 first John last Doe dob 1990-01-01 created_at 2023-04-20 updated_at 2023-04-20

    redis-hash-insert

    Check RedisInsight to verify that the hash value made it into Redis. After RedisGears is done processing the __{users:1} key, it will be deleted from Redis and replaced by the users:1 key. Check RedisInsight to verify that the users:1 key is in Redis.

    redis-hash-view

    Next, confirm that the user is inserted in PostgreSQL too by opening up the select page in Adminer. You should see the user inserted in the table.

    adminer-hash-view

    This is how you can use RedisGears to write through to PostgreSQL, and so far we have only added a hash key. You can also update specific hash fields and it will be reflected in your PostgreSQL database. Run the following command to update the username field:

    > hset __{users:1} username bar

    redis-hash-update

    In RedisInsight, verify that the username field is updated

    redis-hash-updated-view

    Now go into Adminer and check the username field. You should see that it has been updated to bar.

    adminer-updated-hash-view

    Ready to use Redis for write-through caching?

    You now know how to use Redis for write-through caching. It's possible to incrementally adopt Redis wherever needed with different strategies/patterns. For more resources on the topic of caching, check out the links below:

    Additional resources

    - + \ No newline at end of file diff --git a/howtos/solutions/fraud-detection/common-fraud/source-code-tip/index.html b/howtos/solutions/fraud-detection/common-fraud/source-code-tip/index.html index 43b829a9e1..40cf560d20 100644 --- a/howtos/solutions/fraud-detection/common-fraud/source-code-tip/index.html +++ b/howtos/solutions/fraud-detection/common-fraud/source-code-tip/index.html @@ -4,7 +4,7 @@ source-code-tip | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/howtos/solutions/fraud-detection/digital-identity-validation/index.html b/howtos/solutions/fraud-detection/digital-identity-validation/index.html index 04f6c24185..7b987090c6 100644 --- a/howtos/solutions/fraud-detection/digital-identity-validation/index.html +++ b/howtos/solutions/fraud-detection/digital-identity-validation/index.html @@ -4,7 +4,7 @@ How to Handle Digital Identity Validation Using Redis | The Home of Redis Developers - + @@ -18,7 +18,7 @@ Validation Identity as JSON
    Caveat

    Even though you may receive a score of “1” this only means the score has matched 100% against the measured properties only. We are only measuring digital aspects of the identity, which can be compromised. In a real-world scenario you would want to measure more characteristics like location, device type, session, etc. This is in addition to other contextual information for a complete transaction risk score.

    E-commerce application frontend using Next.js and Tailwind

    The e-commerce microservices application consists of a frontend, built using Next.js with TailwindCSS. The application backend uses Node.js. The data is stored in Redis and MongoDB/ Postgressql using Prisma. Below you will find screenshots of the frontend of the e-commerce app:

    • Dashboard: Shows the list of products with search functionality

      redis microservices e-commerce app frontend products page

    • Shopping Cart: Add products to the cart, then check out using the "Buy Now" button redis microservices e-commerce app frontend shopping cart

    • Order history: Once an order is placed, the Orders link in the top navigation bar shows the order status and history

      redis microservices e-commerce app frontend order history page

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone --branch v3.0.0 https://github.com/redis-developer/redis-microservices-ecommerce-solutions

    Building a digital identity validation microservice with redis

    Now, let's go step-by-step through the process of storing, scoring, and validating digital identities using redis with some example code. For demo purposes, we are only using a few characteristics of a user's digital identity like IP address, browser fingerprint, and session. In a real-world application you should store more characteristics like location, device type, and prior actions taken for better risk assessment and identity completeness.

    Storing digital identities in redis in a microservices architecture

    1. login service: stores the (user) digital identity as a INSERT_LOGIN_IDENTITY stream entry to redis
    //addLoginToTransactionStream
    const userId = 'USR_4e7acc44-e91e-4c5c-9112-bdd99d799dd3'; //from session
    const sessionId = 'SES_94ff24a8-65b5-4795-9227-99906a43884e'; //from session
    const persona = 'GRANDFATHER'; //from session

    const entry: ITransactionStreamMessage = {
    action: TransactionStreamActions.INSERT_LOGIN_IDENTITY,
    logMessage: `[${REDIS_STREAMS.CONSUMERS.IDENTITY}] Digital identity to be stored for the user ${userId}`,
    userId,
    persona,
    sessionId,

    identityBrowserAgent: req.headers['user-agent'],
    identityIpAddress:
    req.headers['x-forwarded-for']?.toString() || req.socket.remoteAddress,
    transactionPipeline: JSON.stringify(TransactionPipelines.LOGIN),
    };

    const nodeRedisClient = getNodeRedisClient();
    const streamKeyName = 'TRANSACTION_STREAM';
    const id = '*'; //* = auto generate
    await nodeRedisClient.xAdd(streamKeyName, id, entry);
    1. digital identity service: reads the identity from the INSERT_LOGIN_IDENTITY stream
    interface ListenStreamOptions {
    streams: {
    streamKeyName: string;
    eventHandlers: {
    [messageAction: string]: IMessageHandler;
    };
    }[];
    groupName: string;
    consumerName: string;
    maxNoOfEntriesToReadAtTime?: number;
    }

    // Below is some code for how you would use redis to listen for the stream events:

    const listenToStreams = async (options: ListenStreamOptions) => {
    /*
    (A) create consumer group for the stream
    (B) read set of messages from the stream
    (C) process all messages received
    (D) trigger appropriate action callback for each message
    (E) acknowledge individual messages after processing
    */
    const nodeRedisClient = getNodeRedisClient();
    if (nodeRedisClient) {
    const streams = options.streams;
    const groupName = options.groupName;
    const consumerName = options.consumerName;
    const readMaxCount = options.maxNoOfEntriesToReadAtTime || 100;
    const idInitialPosition = '0'; //0 = start, $ = end or any specific id
    const streamKeyIdArr: {
    key: string;
    id: string;
    }[] = [];

    streams.map(async (stream) => {
    LoggerCls.info(
    `Creating consumer group ${groupName} in stream ${stream.streamKeyName}`,
    );

    try {
    // (A) create consumer group for the stream
    await nodeRedisClient.xGroupCreate(
    stream.streamKeyName,
    groupName,
    idInitialPosition,
    {
    MKSTREAM: true,
    },
    );
    } catch (err) {
    LoggerCls.error(
    `Consumer group ${groupName} already exists in stream ${stream.streamKeyName}!`,
    );
    }

    streamKeyIdArr.push({
    key: stream.streamKeyName,
    id: '>', // Next entry ID that no consumer in this group has read
    });
    });

    LoggerCls.info(`Starting consumer ${consumerName}.`);

    while (true) {
    try {
    // (B) read set of messages from different streams
    const dataArr = await nodeRedisClient.xReadGroup(
    commandOptions({
    isolated: true,
    }),
    groupName,
    consumerName,
    //can specify multiple streams in array [{key, id}]
    streamKeyIdArr,
    {
    COUNT: readMaxCount, // Read n entries at a time
    BLOCK: 5, //block for 0 (infinite) seconds if there are none.
    },
    );

    // dataArr = [
    // {
    // name: 'streamName',
    // messages: [
    // {
    // id: '1642088708425-0',
    // message: {
    // key1: 'value1',
    // },
    // },
    // ],
    // },
    // ];

    //(C) process all messages received
    if (dataArr && dataArr.length) {
    for (let data of dataArr) {
    for (let messageItem of data.messages) {
    const streamKeyName = data.name;

    const stream = streams.find(
    (s) => s.streamKeyName == streamKeyName,
    );

    if (stream && messageItem.message) {
    const streamEventHandlers = stream.eventHandlers;
    const messageAction = messageItem.message.action;
    const messageHandler = streamEventHandlers[messageAction];

    if (messageHandler) {
    // (D) trigger appropriate action callback for each message
    await messageHandler(messageItem.message, messageItem.id);
    }
    //(E) acknowledge individual messages after processing
    nodeRedisClient.xAck(streamKeyName, groupName, messageItem.id);
    }
    }
    }
    } else {
    // LoggerCls.info('No new stream entries.');
    }
    } catch (err) {
    LoggerCls.error('xReadGroup error !', err);
    }
    }
    }
    };

    // `listenToStreams` listens for events and calls the appropriate callback to further handle the events.
    listenToStreams({
    streams: [
    {
    streamKeyName: REDIS_STREAMS.STREAMS.TRANSACTIONS,
    eventHandlers: {
    [TransactionStreamActions.INSERT_LOGIN_IDENTITY]: insertLoginIdentity,
    //...
    },
    },
    ],
    groupName: REDIS_STREAMS.GROUPS.IDENTITY,
    consumerName: REDIS_STREAMS.CONSUMERS.IDENTITY,
    });
    1. digital identity service: stores the identity as JSON to redis
    const insertLoginIdentity: IMessageHandler = async (
    message: ITransactionStreamMessage,
    messageId,
    ) => {
    LoggerCls.info(`Adding digital identity to redis for ${message.userId}`);

    // add login digital identity to redis
    const insertedKey = await addDigitalIdentityToRedis(message);

    //...
    };

    const addDigitalIdentityToRedis = async (
    message: ITransactionStreamMessage,
    ) => {
    let insertedKey = '';

    const userId = message.userId;
    const digitalIdentity: IDigitalIdentity = {
    action: message.action,
    userId: userId,
    sessionId: message.sessionId,

    ipAddress: message.identityIpAddress,
    browserFingerprint: crypto
    .createHash('sha256')
    .update(message.identityBrowserAgent)
    .digest('hex'),
    identityScore: message.identityScore ? message.identityScore : '',

    createdOn: new Date(),
    createdBy: userId,
    statusCode: DB_ROW_STATUS.ACTIVE,
    };

    const repository = digitalIdentityRepo.getRepository();
    if (repository) {
    const entity = repository.createEntity(digitalIdentity);
    insertedKey = await repository.save(entity);
    }

    return insertedKey;
    };

    Validating digital identities using redis in a microservices architecture

    1. orders service: stores the digital identity to be validated in a CALCULATE_IDENTITY_SCORE redis stream
    //adding Identity To TransactionStream
    const userId = 'USR_4e7acc44-e91e-4c5c-9112-bdd99d799dd3';
    const sessionId = 'SES_94ff24a8-65b5-4795-9227-99906a43884e';
    let orderDetails = {
    orderId: '63f5f8dc3696d145a45775a6',
    orderAmount: '1000',
    userId: userId,
    sessionId: sessionId,
    orderStatus: 1,
    products: order.products, //array of product details
    };

    const entry: ITransactionStreamMessage = {
    action: 'CALCULATE_IDENTITY_SCORE',
    logMessage: `Digital identity to be validated/ scored for the user ${userId}`,
    userId: userId,
    sessionId: sessionId,
    orderDetails: orderDetails ? JSON.stringify(orderDetails) : '',
    transactionPipeline: JSON.stringify(TransactionPipelines.CHECKOUT),

    identityBrowserAgent: req.headers['user-agent'],
    identityIpAddress:
    req.headers['x-forwarded-for']?.toString() || req.socket.remoteAddress,
    };

    const nodeRedisClient = getNodeRedisClient();
    const streamKeyName = 'TRANSACTION_STREAM';
    const id = '*'; //* = auto generate
    await nodeRedisClient.xAdd(streamKeyName, id, entry);
    1. Digital identity service reads the identity from the CALCULATE_IDENTITY_SCORE stream
    listenToStreams({
    streams: [
    {
    streamKeyName: REDIS_STREAMS.STREAMS.TRANSACTIONS,
    eventHandlers: {
    // ...
    [TransactionStreamActions.CALCULATE_IDENTITY_SCORE]:
    scoreDigitalIdentity,
    },
    },
    ],
    groupName: REDIS_STREAMS.GROUPS.IDENTITY,
    consumerName: REDIS_STREAMS.CONSUMERS.IDENTITY,
    });

    const scoreDigitalIdentity: IMessageHandler = async (
    message: ITransactionStreamMessage,
    messageId,
    ) => {
    LoggerCls.info(`Scoring digital identity for ${message.userId}`);

    //step 1 - calculate score for validation digital identity
    const identityScore = await calculateIdentityScore(message);
    message.identityScore = identityScore.toString();

    LoggerCls.info(`Adding digital identity to redis for ${message.userId}`);
    //step 2 - add validation digital identity to redis
    const insertedKey = await addDigitalIdentityToRedis(message);

    // ...
    };

    const calculateIdentityScore = async (message: ITransactionStreamMessage) => {
    // Compare the "digital identity" with previously stored "login identities" and determine the identity score

    let identityScore = 0;
    const repository = digitalIdentityRepo.getRepository();

    if (message && message.userId && repository) {
    let queryBuilder = repository
    .search()
    .where('userId')
    .eq(message.userId)
    .and('action')
    .eq('INSERT_LOGIN_IDENTITY')
    .and('statusCode')
    .eq(DB_ROW_STATUS.ACTIVE);

    //console.log(queryBuilder.query);
    const digitalIdentities = await queryBuilder.return.all();

    if (digitalIdentities && digitalIdentities.length) {
    //if browser details matches -> +1 score
    const matchBrowserItems = digitalIdentities.filter((_digIdent) => {
    let identityBrowserAgentHash = crypto
    .createHash('sha256')
    .update(message.identityBrowserAgent)
    .digest('hex');
    return _digIdent.browserFingerprint == identityBrowserAgentHash;
    });
    if (matchBrowserItems.length > 0) {
    identityScore += 1;
    }

    //if IP address matches -> +1 score
    const matchIpAddressItems = digitalIdentities.filter((_digIdent) => {
    return _digIdent.ipAddress == message.identityIpAddress;
    });
    if (matchIpAddressItems.length > 0) {
    identityScore += 1;
    }
    }
    }

    //calculate average score
    const noOfIdentityCharacteristics = 2; //2 == browserFingerprint, ipAddress
    identityScore = identityScore / noOfIdentityCharacteristics;
    return identityScore; // identityScore final value ranges between 0 (no match) and 1 (full match)
    };
    1. digital identity service: stores the identity with score as JSON in redis
    const addDigitalIdentityToRedis = async (
    message: ITransactionStreamMessage,
    ) => {
    let insertedKey = '';

    const userId = message.userId;
    const digitalIdentity: IDigitalIdentity = {
    action: message.action,
    userId: userId,
    sessionId: message.sessionId,

    ipAddress: message.identityIpAddress,
    browserFingerprint: crypto
    .createHash('sha256')
    .update(message.identityBrowserAgent)
    .digest('hex'),
    identityScore: message.identityScore ? message.identityScore : '',

    createdOn: new Date(),
    createdBy: userId,
    statusCode: DB_ROW_STATUS.ACTIVE, //1
    };

    const repository = digitalIdentityRepo.getRepository();
    if (repository) {
    const entity = repository.createEntity(digitalIdentity);
    insertedKey = await repository.save(entity);
    }

    return insertedKey;
    };

    Conclusion

    Now you have learned how to use redis to setup ongoing digital identity monitoring and scoring in a microservices application. This is also called "dynamic digital identity monitoring." Dynamic digital identities are constantly updated based on the information available from each digital transaction. By analyzing these transactions, businesses can build a comprehensive and up-to-date digital identity that includes both static and dynamic elements. These identities can then be scored to determine the risk that they pose to the business.

    In addition to increasing security, digital identities can also improve the customer experience. By using the digital footprint left by a user, businesses can offer more personalized services and reduce friction in the authentication process.

    Digital identity systems are typically designed to be interoperable and scalable, allowing for seamless integration with various applications and platforms.

    Additional Resources

    - + \ No newline at end of file diff --git a/howtos/solutions/fraud-detection/transaction-risk-scoring/index.html b/howtos/solutions/fraud-detection/transaction-risk-scoring/index.html index 0a1cce13a5..2aff723a94 100644 --- a/howtos/solutions/fraud-detection/transaction-risk-scoring/index.html +++ b/howtos/solutions/fraud-detection/transaction-risk-scoring/index.html @@ -4,7 +4,7 @@ How to use Redis for Transaction risk scoring | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    How to use Redis for Transaction risk scoring


    Profile picture for Prasan Kumar
    Author:
    Prasan Kumar, Technical Solutions Developer at Redis
    Profile picture for Will Johnston
    Author:
    Will Johnston, Developer Growth Manager at Redis

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone --branch v3.0.0 https://github.com/redis-developer/redis-microservices-ecommerce-solutions

    What is transaction risk scoring

    "Transaction risk scoring" is a method of leveraging data science, machine learning, and statistical analysis to continuously monitor transactions and assess the relative risk associated with each transaction. By comparing transactional data to models of known fraud, the risk score can be calculated, and the closer a transaction matches fraudulent behaviour, the higher the risk score.

    The score is typically based on a statistical analysis of historical transaction data to identify patterns and trends associated with fraudulent activity. The score can then be used to trigger alerts or to automatically decline transactions that exceed a certain risk threshold. It can also be used to trigger additional authentication steps for high-risk transactions. Additional steps might include a one-time password (OTP) sent via text, email, or biometric scan.

    tip

    Transaction risk scoring is often combined in a single system with other fraud detection methods, such as digital identity validation.

    Why you should use redis for transaction risk scoring

    A risk-based approach must be designed to create a frictionless flow and avoid slowing down the transaction experience for legitimate customers while simultaneously preventing fraud. If your risk-based approach is too strict, it will block legitimate transactions and frustrate customers. If it is too lenient, it will allow fraudulent transactions to go through.

    How to avoid false positives with rules engines

    Rules-based automated fraud detection systems operate on simple "yes or no" logic to determine whether a given transaction is likely to be fraudulent. An example of a rule would be "block all transactions over $500 from a risky region". With a simple binary decision like this, the system is likely to block a lot of genuine customers. Sophisticated fraudsters easily fool such systems, and the complex nature of fraud means that simple "yes or no" rules may not be enough to assess the risk of each transaction accurately.

    More accurate risk scoring with AI/ML addresses these issues. Modern fraud detection systems use machine learning models trained on large volumes of different data sets known as "features"(user profiles, transaction patterns, behavioural attributes and more) to accurately identify fraudulent transactions. These models have been designed to be flexible, so they can adapt to new types of fraud. For example, a neural network can examine suspicious activities like how many pages a customer browses before making an order, whether they are copying and pasting information or typing it in manually and flag the customer for further review.

    The models use historical as well as most recent data to create a risk profile for each customer. By analyzing past behaviour it is possible to create a profile of what is normal for each customer. Any transactions that deviate from this profile can be flagged as suspicious, reducing the likelihood of false positives. The models are very fast to adapt to changes in normal behaviour too, and can quickly identify patterns of fraud transactions.

    This is exactly where Redis Enterprise excels in transaction risk scoring.

    How to use Redis Enterprise for transaction risk scoring

    People use Redis Enterprise as the in-memory online feature store for online and real-time access to feature data as part of a transaction risk scoring system. By serving online features with low latency, Redis Enterprise enables the risk-scoring models to return results in real-time, thereby allowing the whole system to achieve high accuracy and instant response on approving legitimate online transactions.

    Another very common use for Redis Enterprise in transaction risk scoring is for transaction filters. A transaction filter can be implemented as a Bloom filter that stores information about user behaviours. It can answer questions like "Have we seen this user purchase at this merchant before?" Or, "Have we seen this user purchase at this merchant in the X to Y price range before?" Being a probabilistic data structure, Redis Bloom filters do, indeed, sacrifice some accuracy, but in return, they get a very low memory footprint and response time.

    tip

    You might ask why not use a Redis Set to answer some of the questions above. Redis Sets are used to store unordered collections of unique strings (members). They are very efficient, with most operations taking O(1) time complexity. However, the SMEMBERS command is O(N), where N is the cardinality of the set, and can be very slow for large sets and it would also take a lot of memory. This presents a problem both in single instance storage as well as geo-replication, since more data will require more time to move. This is why Redis Bloom filters are a better choice for transaction filters. Applications undergo millions of transactions every day, and Bloom filters maintain a speedy response time at scale.

    Transaction risk scoring in a microservices architecture for an e-commerce application

    The e-commerce microservices application discussed in the rest of this tutorial uses the following architecture:

    1. products service: handles querying products from the database and returning them to the frontend
    2. orders service: handles validating and creating orders
    3. order history service: handles querying a customer's order history
    4. payments service: handles processing orders for payment
    5. digital identity service: handles storing digital identity and calculating identity score
    6. api gateway: unifies services under a single endpoint
    7. mongodb/ postgresql: serves as the primary database, storing orders, order history, products, etc.
    8. redis: serves as the stream processor and caching database
    info

    You don't need to use MongoDB/ Postgresql as your primary database in the demo application; you can use other prisma supported databases as well. This is just an example.

    Transaction risk scoring checkout procedure

    When a user goes to checkout, the system needs to check the user's digital identity and profile to determine the risk of the transaction. The system can then decide whether to approve the transaction or to trigger additional authentication steps. The following diagram shows the flow of transaction risk scoring in the e-commerce application:

    Transaction risk scoring event flow with redis streams

    The following steps are performed in the checkout procedure:

    1. The customer adds an item to the cart and proceeds to checkout.
    2. The order service receives the checkout request and creates an order in the database.
    3. The order services publishes a CALCULATE_IDENTITY_SCORE event to the TRANSACTIONS Redis stream.
    4. The identity service subscribes to the TRANSACTIONS Redis stream and receives the CALCULATE_IDENTITY_SCORE event.
    5. The identity service calculates the identity score for the user and publishes a CALCULATE_PROFILE_SCORE event to the TRANSACTIONS Redis stream.
    6. The profile service subscribes to the TRANSACTIONS Redis stream and receives the CALCULATE_PROFILE_SCORE event.
    7. The profile service calculates the profile score by checking the products in the shopping cart against a known profile for the customer.
    8. The profile service publishes a ASSESS_RISK event to the TRANSACTIONS Redis stream.
    9. The order service subscribes to the TRANSACTIONS Redis stream and receives the ASSESS_RISK event.
    10. The order service determines if there is a likelihood of fraud based on the identity and profile scores. If there is a likelihood of fraud, the order service triggers additional authentication steps. If there is no likelihood of fraud, the order service approves the order and proceeds to process payments.

    E-commerce application frontend using Next.js and Tailwind

    The e-commerce microservices application consists of a frontend, built using Next.js with TailwindCSS. The application backend uses Node.js. The data is stored in Redis and MongoDB/ Postgressql using Prisma. Below you will find screenshots of the frontend of the e-commerce app:

    • Dashboard: Shows the list of products with search functionality

      redis microservices e-commerce app frontend products page

    • Shopping Cart: Add products to the cart, then check out using the "Buy Now" button redis microservices e-commerce app frontend shopping cart

    • Order history: Once an order is placed, the Orders link in the top navigation bar shows the order status and history

      redis microservices e-commerce app frontend order history page

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone --branch v3.0.0 https://github.com/redis-developer/redis-microservices-ecommerce-solutions

    Coding example for transaction risk scoring with redis

    Now that you understand the steps involved in the checkout process for transaction risk scoring, let's look at the code for the order service and profile service to facilitate this process:

    note

    To see the code for the identity service check out the digital identity validation solution.

    Initiating the checkout process in the order service

    When the order service receives a checkout request, it creates an order in the database and publishes a CALCULATE_IDENTITY_SCORE event to the TRANSACTIONS Redis stream. The event contains information about the order as well as the customer, such as the browser fingerprint, IP address, and persona (profile). This data will be used during the transaction by the identity service and profile service to calculate the identity and profile scores. The order service also specifies the transaction pipeline, meaning it determines the order of events called so that the identity service and profile service do not need to be aware of each other. The order service ultimately owns the transaction. The sample code below shows the createOrder function in the order service. The code example below is highly simplified. For more detail please see the source code linked above:

    const TransactionPipelines = {
    CHECKOUT: [
    TransactionStreamActions.CALCULATE_IDENTITY_SCORE,
    TransactionStreamActions.CALCULATE_PROFILE_SCORE,
    TransactionStreamActions.ASSESS_RISK,
    TransactionStreamActions.PROCESS_PAYMENT,
    TransactionStreamActions.PAYMENT_PROCESSED,
    ],
    };

    async function createOrder(
    order: IOrder,
    browserAgent: string,
    ipAddress: string,
    sessionId: string,
    sessionData: ISessionData,
    ) {
    order = await validateOrder(order);

    const orderId = await addOrderToRedis(order);
    order.orderId = orderId;

    await addOrderToMongoDB(order);

    // Log order creation to the LOGS stream
    await streamLog({
    action: 'CREATE_ORDER',
    message: `[${REDIS_STREAMS.CONSUMERS.ORDERS}] Order created with id ${orderId} for the user ${userId}`,
    metadata: {
    userId: userId,
    persona: sessionData.persona,
    sessionId: sessionId,
    },
    });

    let orderAmount = 0;
    order.products?.forEach((product) => {
    orderAmount += product.productPrice * product.qty;
    });

    const orderDetails: IOrderDetails = {
    orderId: orderId,
    orderAmount: orderAmount.toFixed(2),
    userId: userId,
    sessionId: sessionId,
    orderStatus: order.orderStatusCode,
    products: order.products,
    };

    // Initiate the transaction by adding the order details to the transaction stream and sending the first event
    await addMessageToTransactionStream({
    action: TransactionPipelines.CHECKOUT[0],
    logMessage: `[${REDIS_STREAMS.CONSUMERS.IDENTITY}] Digital identity to be validated/ scored for the user ${userId}`,
    userId: userId,
    persona: sessionData.persona,
    sessionId: sessionId,
    orderDetails: orderDetails ? JSON.stringify(orderDetails) : '',
    transactionPipeline: JSON.stringify(TransactionPipelines.CHECKOUT),

    identityBrowserAgent: browserAgent,
    identityIpAddress: ipAddress,
    });

    return orderId;
    }

    Let's look at the addMessageToTransactionStream function in more detail:

    async function addMessageToStream(message, streamKeyName) {
    try {
    const nodeRedisClient = getNodeRedisClient();
    if (nodeRedisClient && message && streamKeyName) {
    const id = '*'; //* = auto generate
    await nodeRedisClient.xAdd(streamKeyName, id, message);
    }
    } catch (err) {
    LoggerCls.error('addMessageToStream error !', err);
    LoggerCls.error(streamKeyName, message);
    }
    }

    async function addMessageToTransactionStream(
    message: ITransactionStreamMessage,
    ) {
    if (message) {
    const streamKeyName = REDIS_STREAMS.STREAMS.TRANSACTIONS;
    await addMessageToStream(message, streamKeyName);
    }
    }

    Checking an order against a known profile in the profile service

    So you can see above, the transaction pipeline follows CALCULATE_IDENTITY_SCORE -> CALCULATE_PROFILE_SCORE -> ASSESS_RISK. Let's now look at how the profile service subscribes to the TRANSACTIONS Redis stream and receives the CALCULATE_PROFILE_SCORE event. When the profile service starts, it subscribes to the TRANSACTIONS Redis stream and listens for events.

    function listen() {
    listenToStreams({
    streams: [
    {
    streamKeyName: REDIS_STREAMS.STREAMS.TRANSACTIONS,
    eventHandlers: {
    [TransactionStreamActions.CALCULATE_PROFILE_SCORE]:
    calculateProfileScore,
    },
    },
    ],
    groupName: REDIS_STREAMS.GROUPS.PROFILE,
    consumerName: REDIS_STREAMS.CONSUMERS.PROFILE,
    });
    }

    A highly simplified version of the listenToStreams method looks as follows. It takes in a list of streams with an associated object that maps events on the stream to a callback for processing the events. It also takes a stream group and a consumer name. Then it handles the subscription to the stream and calling on the appropriate method when an event comes in:

    interface ListenStreamOptions {
    streams: {
    streamKeyName: string;
    eventHandlers: {
    [messageAction: string]: IMessageHandler;
    };
    }[];
    groupName: string;
    consumerName: string;
    maxNoOfEntriesToReadAtTime?: number;
    }

    const listenToStreams = async (options: ListenStreamOptions) => {
    /*
    (A) create consumer group for the stream
    (B) read set of messages from the stream
    (C) process all messages received
    (D) trigger appropriate action callback for each message
    (E) acknowledge individual messages after processing
    */

    const nodeRedisClient = getNodeRedisClient();
    if (nodeRedisClient) {
    const streams = options.streams;
    const groupName = options.groupName;
    const consumerName = options.consumerName;
    const readMaxCount = options.maxNoOfEntriesToReadAtTime || 100;
    const idInitialPosition = '0'; //0 = start, $ = end or any specific id
    const streamKeyIdArr: {
    key: string;
    id: string;
    }[] = [];

    streams.map(async (stream) => {
    LoggerCls.info(
    `Creating consumer group ${groupName} in stream ${stream.streamKeyName}`,
    );

    try {
    // (A) create consumer group for the stream
    await nodeRedisClient.xGroupCreate(
    stream.streamKeyName,
    groupName,
    idInitialPosition,
    {
    MKSTREAM: true,
    },
    );
    } catch (err) {
    LoggerCls.error(
    `Consumer group ${groupName} already exists in stream ${stream.streamKeyName}!`,
    ); //, err
    }

    streamKeyIdArr.push({
    key: stream.streamKeyName,
    id: '>', // Next entry ID that no consumer in this group has read
    });
    });

    LoggerCls.info(`Starting consumer ${consumerName}.`);

    while (true) {
    try {
    // (B) read set of messages from different streams
    const dataArr = await nodeRedisClient.xReadGroup(
    commandOptions({
    isolated: true,
    }),
    groupName,
    consumerName,
    //can specify multiple streams in array [{key, id}]
    streamKeyIdArr,
    {
    COUNT: readMaxCount, // Read n entries at a time
    BLOCK: 5, //block for 0 (infinite) seconds if there are none.
    },
    );

    // dataArr = [
    // {
    // name: 'streamName',
    // messages: [
    // {
    // id: '1642088708425-0',
    // message: {
    // key1: 'value1',
    // },
    // },
    // ],
    // },
    // ];

    //(C) process all messages received
    if (dataArr && dataArr.length) {
    for (let data of dataArr) {
    for (let messageItem of data.messages) {
    const streamKeyName = data.name;

    const stream = streams.find(
    (s) => s.streamKeyName == streamKeyName,
    );

    if (stream && messageItem.message) {
    const streamEventHandlers = stream.eventHandlers;
    const messageAction = messageItem.message.action;
    const messageHandler = streamEventHandlers[messageAction];

    if (messageHandler) {
    // (D) trigger appropriate action callback for each message
    await messageHandler(messageItem.message, messageItem.id);
    }
    //(E) acknowledge individual messages after processing
    nodeRedisClient.xAck(streamKeyName, groupName, messageItem.id);
    }
    }
    }
    } else {
    // LoggerCls.info('No new stream entries.');
    }
    } catch (err) {
    LoggerCls.error('xReadGroup error !', err);
    }
    }
    }
    };

    The processTransactionStream method is called when a new event comes in. It validates the event, making sure it is the CALCULATE_PROFILE_SCORE event, and if it is then it calculates the profile score. It uses a Redis Bloom filter to check if the user has ordered a similar set of products before. It uses a pre-defined persona for the purposes of this demo, but in reality you would build a profile of the user over time. In the demo application, each product has a "master category" and "subcategory". Bloom filters are setup for the master categories as well as the master+subcategories. The scoring logic is highlighted below:

    async function calculateProfileScore(
    message: ITransactionStreamMessage,
    messageId,
    ) {
    LoggerCls.info(`Incoming message in Profile Service ${messageId}`);
    if (!(message.orderDetails && message.persona)) {
    return false;
    }

    await streamLog({
    action: TransactionStreamActions.CALCULATE_PROFILE_SCORE,
    message: `[${REDIS_STREAMS.CONSUMERS.PROFILE}] Calculating profile score for the user ${message.userId}`,
    metadata: message,
    });

    // check profile score
    const { products }: IOrderDetails = JSON.parse(message.orderDetails);
    const persona = message.persona.toLowerCase();
    let score = 0;
    const nodeRedisClient = getNodeRedisClient();

    if (!nodeRedisClient) {
    return false;
    }

    const categories = products.reduce((cat, product) => {
    const masterCategory = product.productData?.masterCategory?.typeName;
    const subCategory = product.productData?.subCategory?.typeName;

    if (masterCategory) {
    cat[`${masterCategory}`.toLowerCase()] = true;

    if (subCategory) {
    cat[`${masterCategory}:${subCategory}`.toLowerCase()] = true;
    }
    }

    return cat;
    }, {} as Record<string, boolean>);

    const categoryKeys = Object.keys(categories);
    const checks = categoryKeys.length;

    LoggerCls.info(
    `Checking ${checks} categories: ${JSON.stringify(categoryKeys)}`,
    );

    await Promise.all(
    categoryKeys.map(async (category) => {
    const exists = await nodeRedisClient.bf.exists(
    `bfprofile:${category}`.toLowerCase(),
    persona,
    );

    if (exists) {
    score += 1;
    }
    }),
    );

    LoggerCls.info(`After ${checks} checks, total score is ${score}`);
    score = score / (checks || 1);

    await streamLog({
    action: TransactionStreamActions.CALCULATE_PROFILE_SCORE,
    message: `[${REDIS_STREAMS.CONSUMERS.PROFILE}] Profile score for the user ${message.userId} is ${score}`,
    metadata: message,
    });

    await nextTransactionStep({
    ...message,
    logMessage: `[${REDIS_STREAMS.CONSUMERS.PROFILE}] Requesting next step in transaction risk scoring for the user ${message.userId}`,
    profileScore: `${score}`,
    });

    return true;
    }

    The nextTransactionStep method is called after the profile score has been calculated. It uses the transactionPipeline setup in the order service to publish the ASSESS_RISK event. The logic for this is below:

    async function nextTransactionStep(message: ITransactionStreamMessage) {
    const transactionPipeline: TransactionStreamActions[] = JSON.parse(
    message.transactionPipeline,
    );
    transactionPipeline.shift();

    if (transactionPipeline.length <= 0) {
    return;
    }

    const streamKeyName = REDIS_STREAMS.STREAMS.TRANSACTIONS;
    await addMessageToStream(
    {
    ...message,
    action: transactionPipeline[0],
    transactionPipeline: JSON.stringify(transactionPipeline),
    },
    streamKeyName,
    );
    }

    In short, the nextTransactionStep method pops the current event off of the transactionPipeline, then it publishes the next event in the pipeline, which in this case is the ASSESS_RISK event.

    Finalizing the order with transaction risk scoring in the order service

    The order service is responsible for finalizing the order prior to payment. It listens to the ASSESS_RISK event, and then checks the calculated scores to determine if there is potential fraud.

    note

    The demo application keeps things very simple, and it only sets a "potentialFraud" flag on the order. In the real world, you need to choose not only what scoring makes sense for your application, but also how to handle potential fraud. For example, you may want to request additional information from the customer such as a one-time password. You may also want to send the order to a human for review. It depends on your business and your risk appetite and mitigation strategy.

    The logic to process and finalize orders in the order service is below:

    async function checkOrderRiskScore(message: ITransactionStreamMessage) {
    LoggerCls.info(`Incoming message in Order Service`);
    if (!message.orderDetails) {
    return false;
    }

    const orderDetails: IOrderDetails = JSON.parse(message.orderDetails);

    if (!(orderDetails.orderId && orderDetails.userId)) {
    return false;
    }

    LoggerCls.info(
    `Transaction risk scoring for user ${message.userId} and order ${orderDetails.orderId}`,
    );

    const { identityScore, profileScore } = message;
    const identityScoreNumber = Number(identityScore);
    const profileScoreNumber = Number(profileScore);
    let potentialFraud = false;

    if (identityScoreNumber <= 0 || profileScoreNumber < 0.5) {
    LoggerCls.info(
    `Transaction risk score is too low for user ${message.userId} and order ${orderDetails.orderId}`,
    );

    await streamLog({
    action: TransactionStreamActions.ASSESS_RISK,
    message: `[${REDIS_STREAMS.CONSUMERS.ORDERS}] Order failed fraud checks for orderId ${orderDetails.orderId} and user ${message.userId}`,
    metadata: message,
    });

    potentialFraud = true;
    }

    orderDetails.orderStatus = ORDER_STATUS.PENDING;
    orderDetails.potentialFraud = potentialFraud;

    updateOrderStatusInRedis(orderDetails);
    /**
    * In real world scenario : can use RDI/ redis gears/ any other database to database sync strategy for REDIS-> Store of record data transfer.
    * To keep it simple, adding data to MongoDB manually in the same service
    */
    updateOrderStatusInMongoDB(orderDetails);

    message.orderDetails = JSON.stringify(orderDetails);

    await streamLog({
    action: TransactionStreamActions.ASSESS_RISK,
    message: `[${REDIS_STREAMS.CONSUMERS.ORDERS}] Order status updated after fraud checks for orderId ${orderDetails.orderId} and user ${message.userId}`,
    metadata: message,
    });

    await nextTransactionStep(message);

    return true;
    }

    Visualizing the transaction risk scoring data and event pipeline in RedisInsight

    tip

    RedisInsight is the free redis GUI for viewing data in redis. Click here to download.

    Now that you understand some of the code involved in processing transactions, let's take a look at the data in RedisInsight. First let's look at the TRANSACTION_STREAM key, which is where the stream data is held for the checkout transaction:

    RedisInsight transaction risk scoring transaction stream

    You can see the action column shows the transaction pipeline discussed earlier. Another thing to look at in RedisInsight is the Bloom filters:

    RedisInsight transaction risk scoring bloom filters

    These filters are pre-populated in the demo application based on a feature store. Redis is also storing the features, which in this case is the profiles of each of the personas. Below is an example of one of the profile features:

    RedisInsight transaction risk scoring feature store

    Conclusion

    In this post, you learned how to use Redis Streams to build a transaction risk scoring pipeline. You also learned how to use Redis Enterprise as a feature store and Redis Bloom filters to calculate a profile score. Every application is unique, so this tutorial is meant to be a starting point for you to build your own transaction risk scoring pipeline.

    Additional resources

    - + \ No newline at end of file diff --git a/howtos/solutions/index.html b/howtos/solutions/index.html index 37b2ea9c8a..bc37dad540 100644 --- a/howtos/solutions/index.html +++ b/howtos/solutions/index.html @@ -4,7 +4,7 @@ Solution Tutorials | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Solution Tutorials

    This page provides a listing of dozens of popular app solution tutorials from Redis.

    Microservices

    Learn how to easily build, test and deploy code for common microservice and caching design patterns across different industries using Redis.

    How to build an e-commerce app using Redis with the CQRS Pattern
    Microservices Communication with Redis streams
    How to use Redis for Query Caching
    How to use Redis for API Gateway Caching

    Fraud detection

    How to Handle Digital Identity Validation Using Redis
    How to use Redis for Transaction risk scoring

    Caching architecture

    How to use Redis for Write-behind Caching
    How to use Redis for Write through caching strategy
    How to use Redis for Cache Prefetching Strategy
    How to use Redis for Cache-aside

    Real-time Inventory

    Available to Promise in Real-time Inventory Using Redis
    Real-time Local Inventory Search Using Redis

    Mobile Banking

    Mobile Banking Authentication and Session Storage Using Redis
    Mobile Banking Account Dashboard Using Redis

    Vectors

    Getting Started with Vector Search Using Redis in NodeJS
    - + \ No newline at end of file diff --git a/howtos/solutions/microservices/api-gateway-caching/index.html b/howtos/solutions/microservices/api-gateway-caching/index.html index 20765001f8..88f5962f81 100644 --- a/howtos/solutions/microservices/api-gateway-caching/index.html +++ b/howtos/solutions/microservices/api-gateway-caching/index.html @@ -4,7 +4,7 @@ How to use Redis for API Gateway Caching | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    How to use Redis for API Gateway Caching


    Profile picture for Prasan Kumar
    Author:
    Prasan Kumar, Technical Solutions Developer at Redis
    Profile picture for Will Johnston
    Author:
    Will Johnston, Developer Growth Manager at Redis

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone --branch v4.2.0 https://github.com/redis-developer/redis-microservices-ecommerce-solutions

    What is API gateway caching?

    So you're building a microservices application. But you find yourself struggling with ways to handle authentication that let you reuse code and maximize performance. Typically for authentication you might use sessions, OAuth, authorization tokens, etc. For the purposes of this tutorial, let's assume we're using an authorization token. In a monolithic application, authentication is pretty straightforward:

    When a request comes in:

    1. Decode the Authorization header.
    2. Validate the credentials.
    3. Store the session information on the request object or cache for further use down the line by the application.

    However, you might be puzzled by how to do this with microservices. Ordinarily, in a microservices application an API gateway serves as the single entry point for clients, which routes traffic to the appropriate services. Depending on the nature of the request, those services may or may not require a user to be authenticated. You might think it's a good idea to handle authentication in each respective service.

    While this works, you end up with a fair amount of duplicated code. Plus, it's difficult to understand when and where slowdowns happen and to scale services appropriately, because you repeat some of the same work in each service. A more effective way to handle authentication is to deal with it at the API gateway layer, and then pass the session information down to each service.

    Once you decide to handle authentication at the API gateway layer, you must decide where to store sessions.

    Imagine you're building an e-commerce application that uses MongoDB/ any relational database as the primary data store. You could store sessions in primary database, but think about how many times the application needs to hit primary database to retrieve session information. If you have millions of customers, you don't want to go to database for every single request made to the API.

    This is where Redis comes in.

    Why you should use Redis for API gateway caching

    Redis is an in-memory datastore, which – among other things – makes it a perfect tool for caching session data. Redis allows you to reduce the load on a primary database while speeding up database reads. The rest of this tutorial covers how to accomplish this in the context of an e-commerce application.

    Microservices architecture for an e-commerce application

    The e-commerce microservices application discussed in the rest of this tutorial uses the following architecture:

    1. products service: handles querying products from the database and returning them to the frontend
    2. orders service: handles validating and creating orders
    3. order history service: handles querying a customer's order history
    4. payments service: handles processing orders for payment
    5. digital identity service: handles storing digital identity and calculating identity score
    6. api gateway: unifies services under a single endpoint
    7. mongodb/ postgresql: serves as the primary database, storing orders, order history, products, etc.
    8. redis: serves as the stream processor and caching database
    info

    You don't need to use MongoDB/ Postgresql as your primary database in the demo application; you can use other prisma supported databases as well. This is just an example.

    The diagram illustrates how the API gateway uses Redis as a cache for session information. The API gateway gets the session from Redis and then passes it on to each microservice. This provides an easy way to handle sessions in a single place, and to permeate them throughout the rest of the microservices.

    API gateway caching with Redis architecture diagram

    tip

    Use a Redis Enterprise Cluster to get the benefit of linear scaling to ensure API calls perform under peak loads. That also provides 99.999% uptime and Active-Active geo-distribution, which prevents loss of authentication and session data.

    E-commerce application frontend using Next.js and Tailwind

    The e-commerce microservices application consists of a frontend, built using Next.js with TailwindCSS. The application backend uses Node.js. The data is stored in Redis and MongoDB/ Postgressql using Prisma. Below you will find screenshots of the frontend of the e-commerce app:

    • Dashboard: Shows the list of products with search functionality

      redis microservices e-commerce app frontend products page

    • Shopping Cart: Add products to the cart, then check out using the "Buy Now" button redis microservices e-commerce app frontend shopping cart

    • Order history: Once an order is placed, the Orders link in the top navigation bar shows the order status and history

      redis microservices e-commerce app frontend order history page

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone --branch v4.2.0 https://github.com/redis-developer/redis-microservices-ecommerce-solutions

    API gateway caching in a microservices application with Redis

    What's nice about a microservice architecture is that each service is set up so it can scale independently. Now, seeing as how each service might require authentication, you likely want to obtain session information for most requests. Therefore, it makes sense to use the API gateway to cache and retrieve session information and to subsequently pass the information on to each service. Let's see how you might accomplish this.

    In our sample application, all requests are routed through the API gateway. We use Express to set up the API gateway, and the Authorization header to pass the authorization token from the frontend to the API. For every request, the API gateway gets the authorization token and looks it up in Redis. Then it passes it along to the correct microservice.

    This code validates the session:

    import {
    createProxyMiddleware,
    responseInterceptor,
    } from 'http-proxy-middleware';

    //-----
    const app: Express = express();

    app.use(cors());
    app.use(async (req, res, next) => {
    const authorizationHeader = req.header('Authorization');
    const sessionInfo = await getSessionInfo(authorizationHeader); //---- (1)

    //add session info to request
    if (sessionInfo?.sessionData && sessionInfo?.sessionId) {
    req.session = sessionInfo?.sessionData;
    req.sessionId = sessionInfo?.sessionId;
    }
    next();
    });

    app.use(
    '/orders',
    createProxyMiddleware({
    // http://localhost:3000/orders/bar -> http://localhost:3001/orders/bar
    target: 'http://localhost:3001',
    changeOrigin: true,
    selfHandleResponse: true,
    onProxyReq(proxyReq, req, res) {
    // pass session info to microservice
    proxyReq.setHeader('x-session', req.session);
    },
    onProxyRes: applyAuthToResponse, //---- (2)
    }),
    );

    app.use(
    '/orderHistory',
    createProxyMiddleware({
    target: 'http://localhost:3002',
    changeOrigin: true,
    selfHandleResponse: true,
    onProxyReq(proxyReq, req, res) {
    // pass session info to microservice
    proxyReq.setHeader('x-session', req.session);
    },
    onProxyRes: applyAuthToResponse, //---- (2)
    }),
    );
    //-----

    const getSessionInfo = async (authHeader?: string) => {
    // (For demo purpose only) random userId and sessionId values are created for first time, then userId is fetched gainst that sessionId for future requests
    let sessionId = '';
    let sessionData: string | null = '';

    if (!!authHeader) {
    sessionId = authHeader.split(/\s/)[1];
    } else {
    sessionId = 'SES_' + randomUUID(); // generate random new sessionId
    }

    const nodeRedisClient = getNodeRedisClient();
    if (nodeRedisClient) {
    const exists = await nodeRedisClient.exists(sessionId);
    if (!exists) {
    await nodeRedisClient.set(
    sessionId,
    JSON.stringify({ userId: 'USR_' + randomUUID() }),
    ); // generate random new userId
    }
    sessionData = await nodeRedisClient.get(sessionId);
    }

    return {
    sessionId: sessionId,
    sessionData: sessionData,
    };
    };

    const applyAuthToResponse = responseInterceptor(
    // adding sessionId to the response so that frontend can store it for future requests

    async (responseBuffer, proxyRes, req, res) => {
    // detect json responses
    if (
    !!proxyRes.headers['content-type'] &&
    proxyRes.headers['content-type'].includes('application/json')
    ) {
    let data = JSON.parse(responseBuffer.toString('utf8'));

    // manipulate JSON data here
    if (!!(req as Request).sessionId) {
    data = Object.assign({}, data, { auth: (req as Request).sessionId });
    }

    // return manipulated JSON
    return JSON.stringify(data);
    }

    // return other content-types as-is
    return responseBuffer;
    },
    );
    info

    This example is not meant to represent the best way to handle authentication. Instead, it illustrates what you might do with respect to Redis. You will likely have a different setup for authentication, but the concept of storing a session in Redis is similar.

    In the code above, we check for the Authorization header, otherwise we create a new one and store it in Redis. Then we retrieve the session from Redis. Further down the line we attach the session to the x-session header prior to calling the orders service.

    Now let's see how the orders service receives the session.

    router.post(API_NAMES.CREATE_ORDER, async (req: Request, res: Response) => {
    const body = req.body;
    const result: IApiResponseBody = {
    data: null,
    error: null,
    };

    const sessionData = req.header('x-session');
    const userId = sessionData ? JSON.parse(sessionData).userId : "";
    ...
    });

    The highlighted line above shows how to pull the session out of the x-session header and get the userId.

    Ready to use Redis for API gateway caching ?

    That's all there is to it! You now know how to use Redis for API gateway caching. It's not complicated to get started, but this simple practice can help you scale as you build out microservices.

    To learn more about Redis, check out the additional resources below:

    Additional resources

    - + \ No newline at end of file diff --git a/howtos/solutions/microservices/caching/index.html b/howtos/solutions/microservices/caching/index.html index 89fda16df5..635a72934c 100644 --- a/howtos/solutions/microservices/caching/index.html +++ b/howtos/solutions/microservices/caching/index.html @@ -4,7 +4,7 @@ How to use Redis for Query Caching | The Home of Redis Developers - + @@ -15,7 +15,7 @@ Query caching is the technique you need to speed database queries by using different caching methods while keeping costs down! Imagine that you built an e-commerce application. It started small but is growing fast. By now, you have an extensive product catalog and millions of customers.

    That's good for business, but a hardship for technology. Your queries to primary database (MongoDB/ Postgressql) are beginning to slow down, even though you already attempted to optimize them. Even though you can squeak out a little extra performance, it isn't enough to satisfy your customers.

    Why you should use Redis for query caching

    Redis is an in-memory datastore, best known for caching. Redis allows you to reduce the load on a primary database while speeding up database reads.

    With any e-commerce application, there is one specific type of query that is most often requested. If you guessed that it’s the product search query, you’re correct!

    To improve product search in an e-commerce application, you can implement one of following caching patterns:

    • Cache prefetching: An entire product catalog can be pre-cached in Redis, and the application can perform any product query on Redis similar to the primary database.
    • Cache-aside pattern: Redis is filled on demand, based on whatever search parameters are requested by the frontend.
    tip

    If you use Redis Enterprise, cache aside is easier due to its support for JSON and search. You also get additional features such as real-time performance, High scalability, resiliency, and fault tolerance. You can also call upon high-availability features such as Active-Active geo-redundancy.

    This tutorial focuses on the cache-aside pattern. The goal of this design pattern is to set up optimal caching (load-as-you-go) for better read operations. With caching, you might be familiar with a "cache miss," where you do not find data in the cache, and a "cache hit," where you can find data in the cache. Let's look at how the cache-aside pattern works with Redis for both a "cache miss" and a "cache hit."

    Cache-aside with Redis (cache miss)

    Cache miss when using the cache-aside pattern with Redis

    This diagram illustrates the steps taken in the cache-aside pattern when there is a "cache miss." To understand how this works, consider the following process:

    1. An application requests data from the backend.
    2. The backend checks to find out if the data is available in Redis.
    3. Data is not found (a cache miss), so the data is fetched from the database.
    4. The data returned from the database is subsequently stored in Redis.
    5. The data is then returned to the application.

    Cache-aside with Redis (cache hit)

    Now that you have seen what a "cache miss" looks like, let's cover a "cache hit." Here is the same diagram, but with the "cache hit" steps highlighted in green.

    Cache hit when using the cache-aside pattern with Redis
    1. An application requests data from the backend.
    2. The backend checks to find out if the data is available in Redis.
    3. The data is then returned to the application.

    The cache-aside pattern is useful when you need to:

    1. Query data frequently: When you have a large volume of reads (as is the case in an e-commerce application), the cache-aside pattern gives you an immediate performance gain for subsequent data requests.
    2. Fill the cache on demand: The cache-aside pattern fills the cache as data is requested rather than pre-caching, thus saving on space and cost. This is useful when it isn't clear what data will need to be cached.
    3. Be cost-conscious: Since cache size is directly related to the cost of cache storage in the cloud, the smaller the size, the less you pay.
    tip

    If you use Redis Enterprise and a database that uses a JDBC driver, you can take advantage of Redis Smart Cache, which lets you add caching to an application without changing the code. Click here to learn more!

    Microservices architecture for an e-commerce application

    The e-commerce microservices application discussed in the rest of this tutorial uses the following architecture:

    1. products service: handles querying products from the database and returning them to the frontend
    2. orders service: handles validating and creating orders
    3. order history service: handles querying a customer's order history
    4. payments service: handles processing orders for payment
    5. digital identity service: handles storing digital identity and calculating identity score
    6. api gateway: unifies services under a single endpoint
    7. mongodb/ postgresql: serves as the primary database, storing orders, order history, products, etc.
    8. redis: serves as the stream processor and caching database
    info

    You don't need to use MongoDB/ Postgresql as your primary database in the demo application; you can use other prisma supported databases as well. This is just an example.

    E-commerce application frontend using Next.js and Tailwind

    The e-commerce microservices application consists of a frontend, built using Next.js with TailwindCSS. The application backend uses Node.js. The data is stored in Redis and MongoDB/ Postgressql using Prisma. Below you will find screenshots of the frontend of the e-commerce app:

    • Dashboard: Shows the list of products with search functionality

      redis microservices e-commerce app frontend products page

    • Shopping Cart: Add products to the cart, then check out using the "Buy Now" button redis microservices e-commerce app frontend shopping cart

    • Order history: Once an order is placed, the Orders link in the top navigation bar shows the order status and history

      redis microservices e-commerce app frontend order history page

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone --branch v4.2.0 https://github.com/redis-developer/redis-microservices-ecommerce-solutions

    Caching in a microservices application with Redis and primary database (MongoDB/ Postgressql)

    In our sample application, the products service publishes an API for filtering products. Here's what a call to the API looks like:

    Get products by filter request

    docs/api/get-products-by-filter.md
    // POST http://localhost:3000/products/getProductsByFilter
    {
    "productDisplayName": "puma"
    }

    Get products by filter response (cache miss)

    {
    "data": [
    {
    "productId": "11000",
    "price": 3995,
    "productDisplayName": "Puma Men Slick 3HD Yellow Black Watches",
    "variantName": "Slick 3HD Yellow",
    "brandName": "Puma",
    "ageGroup": "Adults-Men",
    "gender": "Men",
    "displayCategories": "Accessories",
    "masterCategory_typeName": "Accessories",
    "subCategory_typeName": "Watches",
    "styleImages_default_imageURL": "http://host.docker.internal:8080/images/11000.jpg",
    "productDescriptors_description_value": "<p style=\"text-align: justify;\">Stylish and comfortable, ...",
    "createdOn": "2023-07-13T14:07:38.020Z",
    "createdBy": "ADMIN",
    "lastUpdatedOn": "2023-07-13T14:07:38.020Z",
    "lastUpdatedBy": null,
    "statusCode": 1
    }
    //...
    ],
    "error": null,
    "isFromCache": false
    }

    Get products by filter response (cache hit)

    {
    "data": [
    //...same data as above
    ],
    "error": null,
    "isFromCache": true // now the data comes from the cache rather DB
    }

    Implementing cache-aside with Redis and primary database (MongoDB/ Postgressql)

    The following code shows the function used to search for products in primary database:

    server/src/services/products/src/service-impl.ts
    async function getProductsByFilter(productFilter: Product) {
    const prisma = getPrismaClient();

    const whereQuery: Prisma.ProductWhereInput = {
    statusCode: DB_ROW_STATUS.ACTIVE,
    };

    if (productFilter && productFilter.productDisplayName) {
    whereQuery.productDisplayName = {
    contains: productFilter.productDisplayName,
    mode: 'insensitive',
    };
    }

    const products: Product[] = await prisma.product.findMany({
    where: whereQuery,
    });

    return products;
    }

    You simply make a call to primary database (MongoDB/ Postgressql) to find products based on a filter on the product's displayName property. You can set up multiple columns for better fuzzy searching, but we simplified it for the purposes of this tutorial.

    Using primary database directly without Redis works for a while, but eventually it slows down. That's why you might use Redis, to speed things up. The cache-aside pattern helps you balance performance with cost.

    The basic decision tree for cache-aside is as follows.

    When the frontend requests products:

    1. Form a hash with the contents of the request (i.e., the search parameters).
    2. Check Redis to see if a value exists for the hash.
    3. Is there a cache hit? If data is found for the hash, it is returned; the process stops here.
    4. Is there a cache miss? When data is not found, it is read out of primary database and subsequently stored in Redis prior to being returned.

    Here’s the code used to implement the decision tree:

    server/src/services/products/src/routes.ts
    const getHashKey = (_filter: Document) => {
    let retKey = '';
    if (_filter) {
    const text = JSON.stringify(_filter);
    retKey = crypto.createHash('sha256').update(text).digest('hex');
    }
    return 'CACHE_ASIDE_' + retKey;
    };

    router.post(API.GET_PRODUCTS_BY_FILTER, async (req: Request, res: Response) => {
    const body = req.body;
    // using node-redis
    const redis = getNodeRedisClient();

    //get data from redis
    const hashKey = getHashKey(req.body);
    const cachedData = await redis.get(hashKey);
    const docArr = cachedData ? JSON.parse(cachedData) : [];

    if (docArr && docArr.length) {
    result.data = docArr;
    result.isFromCache = true;
    } else {
    // get data from primary database
    const dbData = await getProductsByFilter(body); //method shown earlier

    if (body && body.productDisplayName && dbData.length) {
    // set data in redis (no need to wait)
    redis.set(hashKey, JSON.stringify(dbData), {
    EX: 60, // cache expiration in seconds
    });
    }

    result.data = dbData;
    }

    res.send(result);
    });
    tip

    You need to decide what expiry or time to live (TTL) works best for your particular use case.

    Ready to use Redis for query caching?

    You now know how to use Redis for caching with one of the most common caching patterns (cache-aside). It's possible to incrementally adopt Redis wherever needed with different strategies/patterns. For more resources on the topic of microservices, check out the links below:

    Additional resources

    - + \ No newline at end of file diff --git a/howtos/solutions/microservices/common-data/microservices-arch-with-redis-old/index.html b/howtos/solutions/microservices/common-data/microservices-arch-with-redis-old/index.html index 0d29fa7009..bd621a6d6d 100644 --- a/howtos/solutions/microservices/common-data/microservices-arch-with-redis-old/index.html +++ b/howtos/solutions/microservices/common-data/microservices-arch-with-redis-old/index.html @@ -4,7 +4,7 @@ microservices-arch-with-redis-old | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    microservices-arch-with-redis-old

    The e-commerce microservices application discussed in the rest of this tutorial uses the following architecture:

    1. products service: handles querying products from the database and returning them to the frontend
    2. orders service: handles validating and creating orders
    3. order history service: handles querying a customer's order history
    4. payments service: handles processing orders for payment
    5. digital identity service: handles storing digital identity and calculating identity score
    6. api gateway: unifies services under a single endpoint
    7. mongodb: serves as the primary database, storing orders, order history, products, etc.
    8. redis: serves as the stream processor and caching database
    - + \ No newline at end of file diff --git a/howtos/solutions/microservices/common-data/microservices-arch-with-redis/index.html b/howtos/solutions/microservices/common-data/microservices-arch-with-redis/index.html index 72452c596a..2a47b40092 100644 --- a/howtos/solutions/microservices/common-data/microservices-arch-with-redis/index.html +++ b/howtos/solutions/microservices/common-data/microservices-arch-with-redis/index.html @@ -4,7 +4,7 @@ microservices-arch-with-redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    microservices-arch-with-redis

    The e-commerce microservices application discussed in the rest of this tutorial uses the following architecture:

    1. products service: handles querying products from the database and returning them to the frontend
    2. orders service: handles validating and creating orders
    3. order history service: handles querying a customer's order history
    4. payments service: handles processing orders for payment
    5. digital identity service: handles storing digital identity and calculating identity score
    6. api gateway: unifies services under a single endpoint
    7. mongodb/ postgresql: serves as the primary database, storing orders, order history, products, etc.
    8. redis: serves as the stream processor and caching database
    info

    You don't need to use MongoDB/ Postgresql as your primary database in the demo application; you can use other prisma supported databases as well. This is just an example.

    - + \ No newline at end of file diff --git a/howtos/solutions/microservices/common-data/microservices-arch/index.html b/howtos/solutions/microservices/common-data/microservices-arch/index.html index 08b69478fa..5138905eb7 100644 --- a/howtos/solutions/microservices/common-data/microservices-arch/index.html +++ b/howtos/solutions/microservices/common-data/microservices-arch/index.html @@ -4,7 +4,7 @@ microservices-arch | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    microservices-arch

    You eventually land on the following architecture:

    1. products service: handles querying products from the database and returning them to the frontend
    2. orders service: handles validating and creating orders
    3. order history service: handles querying a customer's order history
    4. payments service: handles processing orders for payment
    5. api gateway: unifies the services under a single endpoint
    6. mongodb/ postgresql: serves as the write-optimized database for storing orders, order history, products, etc.
    info

    You don't need to use MongoDB/ Postgresql as your write-optimized database in the demo application; you can use other prisma supported databases as well. This is just an example.

    - + \ No newline at end of file diff --git a/howtos/solutions/microservices/common-data/microservices-ecommerce-old/index.html b/howtos/solutions/microservices/common-data/microservices-ecommerce-old/index.html index 7039496488..627d730699 100644 --- a/howtos/solutions/microservices/common-data/microservices-ecommerce-old/index.html +++ b/howtos/solutions/microservices/common-data/microservices-ecommerce-old/index.html @@ -4,7 +4,7 @@ microservices-ecommerce-old | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    microservices-ecommerce-old

    The e-commerce microservices application consists of a frontend, built using Next.js with TailwindCSS. The application backend uses Node.js. The data is stored in Redis and MongoDB. Below you will find screenshots of the frontend of the e-commerce app:

    • Dashboard: Shows the list of products with search functionality

      redis microservices e-commerce app frontend products page

    • Shopping Cart: Add products to the cart, then check out using the "Buy Now" button redis microservices e-commerce app frontend shopping cart

    • Order history: Once an order is placed, the Orders link in the top navigation bar shows the order status and history

      redis microservices e-commerce app frontend order history page

    - + \ No newline at end of file diff --git a/howtos/solutions/microservices/common-data/microservices-ecommerce/index.html b/howtos/solutions/microservices/common-data/microservices-ecommerce/index.html index b4216e558f..fca8335a75 100644 --- a/howtos/solutions/microservices/common-data/microservices-ecommerce/index.html +++ b/howtos/solutions/microservices/common-data/microservices-ecommerce/index.html @@ -4,7 +4,7 @@ microservices-ecommerce | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    microservices-ecommerce

    The e-commerce microservices application consists of a frontend, built using Next.js with TailwindCSS. The application backend uses Node.js. The data is stored in Redis and MongoDB/ Postgressql using Prisma. Below you will find screenshots of the frontend of the e-commerce app:

    • Dashboard: Shows the list of products with search functionality

      redis microservices e-commerce app frontend products page

    • Shopping Cart: Add products to the cart, then check out using the "Buy Now" button redis microservices e-commerce app frontend shopping cart

    • Order history: Once an order is placed, the Orders link in the top navigation bar shows the order status and history

      redis microservices e-commerce app frontend order history page

    - + \ No newline at end of file diff --git a/howtos/solutions/microservices/common-data/microservices-source-code-tip-old/index.html b/howtos/solutions/microservices/common-data/microservices-source-code-tip-old/index.html index 0c593be144..cf8222ac1d 100644 --- a/howtos/solutions/microservices/common-data/microservices-source-code-tip-old/index.html +++ b/howtos/solutions/microservices/common-data/microservices-source-code-tip-old/index.html @@ -4,7 +4,7 @@ microservices-source-code-tip-old | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/howtos/solutions/microservices/common-data/microservices-source-code-tip/index.html b/howtos/solutions/microservices/common-data/microservices-source-code-tip/index.html index bdddcd05af..537d2c80c0 100644 --- a/howtos/solutions/microservices/common-data/microservices-source-code-tip/index.html +++ b/howtos/solutions/microservices/common-data/microservices-source-code-tip/index.html @@ -4,7 +4,7 @@ microservices-source-code-tip | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/howtos/solutions/microservices/common-data/redis-enterprise/index.html b/howtos/solutions/microservices/common-data/redis-enterprise/index.html index b0d1ae4a1d..29e66ff6f3 100644 --- a/howtos/solutions/microservices/common-data/redis-enterprise/index.html +++ b/howtos/solutions/microservices/common-data/redis-enterprise/index.html @@ -4,7 +4,7 @@ redis-enterprise | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    redis-enterprise

    You can use Redis Enterprise as a multi-model primary database. Redis Enterprise is a fully managed, highly available, secure, and real-time data platform. It can store data on both RAM or Flash. It also supports Active-Active (multi-zone read and write replicas) on different cloud vendors, providing extreme high availability and scalability. Active-Active offers global scalability while maintaining local speed for database reads and writes.

    Redis Enterprise has many built-in modular capabilities, making it a unified, real-time data platform. Redis Enterprise is far more than a document database.

    • JSON: Persists JSON documents
    • Search: Indexes and searches JSON documents
    • Probabilistic data structures: Provides bloom filters and other probabilistic data structures
    • Time Series: Supports time series data structures
    • Triggers and Functions: Syncs data to external databases via different pattern (write-behind/ write-through) or executes custom logic.

    Use RedisInsight to view your Redis data or to play with raw Redis commands in the workbench.

    If you're interested in diving deeper, try Redis Enterprise today for free!

    - + \ No newline at end of file diff --git a/howtos/solutions/microservices/cqrs/index.html b/howtos/solutions/microservices/cqrs/index.html index 115eeadf29..96a38ab7fc 100644 --- a/howtos/solutions/microservices/cqrs/index.html +++ b/howtos/solutions/microservices/cqrs/index.html @@ -4,7 +4,7 @@ How to Build an E-Commerce App Using Redis with the CQRS Pattern | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    How to Build an E-Commerce App Using Redis with the CQRS Pattern


    Profile picture for Prasan Kumar
    Author:
    Prasan Kumar, Technical Solutions Developer at Redis
    Profile picture for Will Johnston
    Author:
    Will Johnston, Developer Growth Manager at Redis

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone --branch v4.2.0 https://github.com/redis-developer/redis-microservices-ecommerce-solutions

    What is command and query responsibility segregation (CQRS)?

    Command Query Responsibility Segregation (CQRS) is a critical pattern within a microservice architecture. It decouples reads (queries) and writes (commands), which permits read and write workloads to work independently.

    Commands(write) focus on higher durability and consistency, while queries(read) focus on performance. This enables a microservice to write data to a slower system of record disk-based database, while pre-fetching and caching that data in a cache for real-time reads.

    The idea is simple: you separate commands such as "Order this product" (a write operation) from queries such as "Show me my order history" (a read operation). CQRS applications are often messaging-based and rely on eventual consistency.

    The sample data architecture that follows demonstrates how to use Redis with CQRS:

    CQRS architecture with Redis

    The architecture illustrated in the diagram uses the Change Data Capture pattern (noted as "Integrated CDC") to track the changed state on the command database and to replicate it to the query database (Redis). This is a common pattern used with CQRS.

    Implementing CDC requires:

    1. Taking the data snapshot from the system of record
    2. Performing an ETL operation finalized to load the data on the target cache database
    3. Setting up a mechanism to continuously stream the changes in the system of record to the cache
    tip

    While you can implement your own CDC mechanism with Redis using RedisGears, Redis Enterprise comes with its own integrated CDC mechanism to solve this problem for you.

    Why you might use CQRS

    To improve application performance, scale your read and write operations separately.

    Consider the following scenario: You have an e-commerce application that allows a customer to populate a shopping cart with products. The site has a "Buy Now" button to facilitate ordering those products. When first starting out, you might set up and populate a product database (perhaps a SQL database). Then you might write a backend API to handle the processes of creating an order, creating an invoice, processing payments, handling fulfillment, and updating the customer's order history… all in one go.

    This method of synchronous order processing seemed like a good idea. But you soon find out that your database slows down as you gain more customers and have a higher sales volume. In reality, most applications have significantly more reads than writes. You should scale those operations separately.

    You decide that you need to process orders quickly so the customer doesn't have to wait. Then, when you have time, you can create an invoice, process payment, handle fulfillment, etc.

    So you decide to separate each of these steps. Using a microservices approach with CQRS allows you to scale your reads and writes independently as well as aid in decoupling your microservices. With a CQRS model, a single service is responsible for handling an entire command from end to end. One service should not depend on another service in order to complete a command.

    Microservices CQRS architecture for an e-commerce application

    You eventually land on the following architecture:

    1. products service: handles querying products from the database and returning them to the frontend
    2. orders service: handles validating and creating orders
    3. order history service: handles querying a customer's order history
    4. payments service: handles processing orders for payment
    5. api gateway: unifies the services under a single endpoint
    6. mongodb/ postgresql: serves as the write-optimized database for storing orders, order history, products, etc.
    info

    You don't need to use MongoDB/ Postgresql as your write-optimized database in the demo application; you can use other prisma supported databases as well. This is just an example.

    Using CQRS in a microservices architecture

    Note that in the current architecture all the services use the same underlying database. Even though you’re technically separating reads and writes, you can't scale the write-optimized database independently. This is where Redis comes in. If you put Redis in front of your write-optimized database, you can use it for reads while writing to the write-optimized database. The benefit of Redis is that it’s fast for reads and writes, which is why it’s the best choice for caching and CQRS.

    info

    For the purposes of this tutorial, we’re not highlighting how communication is coordinated between our services, such as how new orders are processed for payment. That process uses Redis Streams, and is outlined in our interservice communication guide.

    tip

    When your e-commerce application eventually needs to scale across the globe, Redis Enterprise provides Active-Active geo-distribution for reads and writes at local latencies as well as availability of 99.999% uptime.

    Let's look at some sample code that helps facilitate the CQRS pattern with Redis and Primary database (MongoDB/ Postgressql).

    E-commerce application frontend using Next.js and Tailwind

    The e-commerce microservices application consists of a frontend, built using Next.js with TailwindCSS. The application backend uses Node.js. The data is stored in Redis and MongoDB/ Postgressql using Prisma. Below you will find screenshots of the frontend of the e-commerce app:

    • Dashboard: Shows the list of products with search functionality

      redis microservices e-commerce app frontend products page

    • Shopping Cart: Add products to the cart, then check out using the "Buy Now" button redis microservices e-commerce app frontend shopping cart

    • Order history: Once an order is placed, the Orders link in the top navigation bar shows the order status and history

      redis microservices e-commerce app frontend order history page

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone --branch v4.2.0 https://github.com/redis-developer/redis-microservices-ecommerce-solutions

    Building a CQRS microservices application with Redis and Primary database (MongoDB/ Postgressql)

    Let's look at the sample code for the order service and see the CreateOrder command (a write operation). Then we look at the order history service to see the ViewOrderHistory command (a read operation).

    Create order command API

    The code that follows shows an example API request and response to create an order.

    Create order request

    docs/api/create-order.md
    // POST http://api-gateway/orders/createOrder
    {
    "products": [
    {
    "productId": "11002",
    "qty": 1,
    "productPrice": 4950
    },
    {
    "productId": "11012",
    "qty": 2,
    "productPrice": 1195
    }
    ]
    }

    Create order response

    {
    "data": "d4075f43-c262-4027-ad25-7b1bc8c490b6", //orderId
    "error": null
    }

    When you make a request, it goes through the API gateway to the orders service. Ultimately, it ends up calling a createOrder function which looks as follows:

    server/src/services/orders/src/service-impl.ts
    const createOrder = async (
    order: IOrder,
    //...
    ) => {
    if (!order) {
    throw 'Order data is mandatory!';
    }

    const userId = order.userId || USERS.DEFAULT; // Used as a shortcut, in a real app you would use customer session data to fetch user details
    const orderId = uuidv4();

    order.orderId = orderId;
    order.orderStatusCode = ORDER_STATUS.CREATED;
    order.userId = userId;
    order.createdBy = userId;
    order.statusCode = DB_ROW_STATUS.ACTIVE;
    order.potentialFraud = false;

    order = await validateOrder(order);

    const products = await getProductDetails(order);
    addProductDataToOrders(order, products);

    await addOrderToRedis(order);

    await addOrderToPrismaDB(order);

    //...

    return orderId;
    };
    info

    For tutorial simplicity, we add data to both primary database and Redis in the same service (double-write). As mentioned earlier, a common pattern is to have your services write to one database, and then separately use a CDC mechanism to update the other database. For example, you could write directly to Redis, then use RedisGears to handle synchronizing Redis and primary database in the background. For the purposes of this tutorial, we don't outline exactly how you might handle synchronization, but instead focus on how the data is stored and accessed in Redis.

    tip

    If you're using Redis Enterprise, you can take advantage of the integrated CDC mechanism to avoid having to roll your own.

    Note that in the previous code block we call the addOrderToRedis function to store orders in Redis. We use Redis OM for Node.js to store the order entities in Redis. This is what that function looks like:

    server/src/services/orders/src/service-impl.ts
    import { Schema, Repository } from 'redis-om';
    import { getNodeRedisClient } from '../utils/redis/redis-wrapper';

    //Redis Om schema for Order
    const schema = new Schema('Order', {
    orderId: { type: 'string', indexed: true },

    orderStatusCode: { type: 'number', indexed: true },
    potentialFraud: { type: 'boolean', indexed: false },
    userId: { type: 'string', indexed: true },

    createdOn: { type: 'date', indexed: false },
    createdBy: { type: 'string', indexed: true },
    lastUpdatedOn: { type: 'date', indexed: false },
    lastUpdatedBy: { type: 'string', indexed: false },
    statusCode: { type: 'number', indexed: true },
    });

    //Redis OM repository for Order (to read, write and remove orders)
    const getOrderRepository = () => {
    const redisClient = getNodeRedisClient();
    const repository = new Repository(schema, redisClient);
    return repository;
    };

    //Redis indexes data for search
    const createRedisIndex = async () => {
    const repository = getRepository();
    await repository.createIndex();
    };

    const addOrderToRedis = async (order: OrderWithIncludes) => {
    if (order) {
    const repository = getOrderRepository();
    //insert Order in to Redis
    await repository.save(order.orderId, order);
    }
    };

    Sample Order view using RedisInsight

    sample order
    tip

    Download RedisInsight to view your Redis data or to play with raw Redis commands in the workbench. Learn more by reading the RedisInsight tutorial

    Order history API

    The code that follows shows an example API request and response to get a customer's order history.

    Order history request

    docs/api/view-order-history.md
    // GET http://api-gateway/orderHistory/viewOrderHistory

    Order history response

    {
    "data": [
    {
    "orderId": "d4075f43-c262-4027-ad25-7b1bc8c490b6",
    "userId": "USR_22fcf2ee-465f-4341-89c2-c9d16b1f711b",
    "orderStatusCode": 4,
    "products": [
    {
    "productId": "11002",
    "qty": 1,
    "productPrice": 4950,
    "productData": {
    "productId": "11002",
    "price": 4950,
    "productDisplayName": "Puma Men Race Black Watch",
    "variantName": "Race 85",
    "brandName": "Puma",
    "ageGroup": "Adults-Men",
    "gender": "Men",
    "displayCategories": "Accessories",
    "masterCategory_typeName": "Accessories",
    "subCategory_typeName": "Watches",
    "styleImages_default_imageURL": "http://host.docker.internal:8080/images/11002.jpg",
    "productDescriptors_description_value": "<p>This watch from puma comes in a heavy duty design. The assymentric dial and chunky..."
    }
    },
    {
    "productId": "11012",
    "qty": 2,
    "productPrice": 1195,
    "productData": {
    "productId": "11012",
    "price": 1195,
    "productDisplayName": "Wrangler Women Frill Check Multi Tops",
    "variantName": "FRILL CHECK",
    "brandName": "Wrangler",
    "ageGroup": "Adults-Women",
    "gender": "Women",
    "displayCategories": "Sale and Clearance,Casual Wear",
    "masterCategory_typeName": "Apparel",
    "subCategory_typeName": "Topwear",
    "styleImages_default_imageURL": "http://host.docker.internal:8080/images/11012.jpg",
    "productDescriptors_description_value": "<p><strong>Composition</strong><br /> Navy blue, red, yellow and white checked top made of 100% cotton, with a jabot collar, buttoned ..."
    }
    }
    ],
    "createdBy": "USR_22fcf2ee-465f-4341-89c2-c9d16b1f711b",
    "lastUpdatedOn": "2023-07-13T14:11:49.997Z",
    "lastUpdatedBy": "USR_22fcf2ee-465f-4341-89c2-c9d16b1f711b"
    }
    ],
    "error": null
    }

    When you make a request, it goes through the API gateway to the order history service. Ultimately, it ends up calling a viewOrderHistory function, which looks as follows:

    server/src/services/order-history/src/service-impl.ts
    const viewOrderHistory = async (userId: string) => {
    const repository = OrderRepo.getRepository();
    let orders: Partial<IOrder>[] = [];
    const queryBuilder = repository
    .search()
    .where('createdBy')
    .eq(userId)
    .and('orderStatusCode')
    .gte(ORDER_STATUS.CREATED) //returns CREATED and PAYMENT_SUCCESS
    .and('statusCode')
    .eq(DB_ROW_STATUS.ACTIVE);

    console.log(queryBuilder.query);
    orders = <Partial<IOrder>[]>await queryBuilder.return.all();
    };
    info

    Note that the order history service only needs to go to Redis for all orders. This is because we handle storage and synchronization between Redis and primary database within the orders service.

    You might be used to using Redis as a cache and both storing and retrieving stringified JSON values or perhaps hashed values. However, look closely at the code above. In it, we store orders as JSON documents, and then use Redis OM to search for the orders that belong to a specific user. Redis operates like a search engine, here, with the ability to speed up queries and scale independently from the primary database (which in this case is MongoDB/ Postgressql).

    Ready to use Redis with the CQRS pattern?

    Hopefully, this tutorial has helped you visualize how to use Redis with the CQRS pattern. It can help to reduce the load on your primary database while still allowing you to store and search JSON documents. For additional resources related to this topic, check out the links below:

    Additional resources

    - + \ No newline at end of file diff --git a/howtos/solutions/microservices/interservice-communication/index.html b/howtos/solutions/microservices/interservice-communication/index.html index 2afc74a211..499faf83a6 100644 --- a/howtos/solutions/microservices/interservice-communication/index.html +++ b/howtos/solutions/microservices/interservice-communication/index.html @@ -4,7 +4,7 @@ Microservices Communication with Redis Streams | The Home of Redis Developers - + @@ -16,7 +16,7 @@ payments-stream

  • The orders service listens to the PAYMENTS_STREAM and updates the orderStatus and paymentId for orders in the database accordingly as the order payment is fulfilled (i.e., it acts as the CONSUMER of the PAYMENTS_STREAM).

  • {
    //order collection update
    "orderId": "01GTP3K2TZQQCQ0T2G43DSSMTD",
    "paymentId": "6403212956a976300afbaac1",
    "orderStatusCode": 3 //payment success
    //...
    }

    E-commerce application frontend using Next.js and Tailwind

    The e-commerce microservices application consists of a frontend, built using Next.js with TailwindCSS. The application backend uses Node.js. The data is stored in Redis and MongoDB. Below you will find screenshots of the frontend of the e-commerce app:

    • Dashboard: Shows the list of products with search functionality

      redis microservices e-commerce app frontend products page

    • Shopping Cart: Add products to the cart, then check out using the "Buy Now" button redis microservices e-commerce app frontend shopping cart

    • Order history: Once an order is placed, the Orders link in the top navigation bar shows the order status and history

      redis microservices e-commerce app frontend order history page

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone --branch v1.0.0 https://github.com/redis-developer/redis-microservices-ecommerce-solutions

    Building an interservice communication application with Redis

    We use Redis to broker the events sent between the orders service and the payments service.

    Producer 1 (orders service)

    Let's look at some of the code in the orders service to understand how it works:

    1. Orders are created.
    2. After order creation, the orders service appends minimal data to the ORDERS_STREAM to signal new order creation.
    server/src/services/orders/src/service-impl.ts
    const addOrderIdToStream = async (
    orderId: string,
    orderAmount: number,
    userId: string,
    ) => {
    const nodeRedisClient = getNodeRedisClient();
    if (orderId && nodeRedisClient) {
    const streamKeyName = 'ORDERS_STREAM';
    const entry = {
    orderId: orderId,
    orderAmount: orderAmount.toFixed(2),
    userId: userId,
    };
    const id = '*'; //* = auto generate
    //xAdd adds entry to specified stream
    await nodeRedisClient.xAdd(streamKeyName, id, entry);
    }
    };

    Consumer 1 (payments service)

    1. The payments service listens to the ORDERS_STREAM
    server/src/services/payments/src/service-impl.ts
    // Below is some code for how you would use Redis to listen for the stream events:

    async function listenToStream(
    onMessage: (message: any, messageId: string) => Promise<void>,
    ) {
    // using node-redis
    const redis = getNodeRedisClient();
    const streamKeyName = 'ORDERS_STREAM'; //stream name
    const groupName = 'ORDERS_CON_GROUP'; // listening consumer group name (custom)
    const consumerName = 'PAYMENTS_CON'; // listening consumer name (custom)
    const readMaxCount = 100;

    // Check if the stream group already exists
    if (!(await redis.exists(streamKeyName))) {
    const idPosition = '0'; //0 = start, $ = end or any specific id
    await nodeRedisClient.xGroupCreate(streamKeyName, groupName, idPosition, {
    MKSTREAM: true,
    });
    }

    // setup a loop to listen for stream events
    while (true) {
    // read set of messages from different streams
    const dataArr = await nodeRedisClient.xReadGroup(
    commandOptions({
    isolated: true,
    }),
    groupName,
    consumerName,
    [
    {
    // you can specify multiple streams in array
    key: streamKeyName,
    id: '>', // Next entry ID that no consumer in this group has read
    },
    ],
    {
    COUNT: readMaxCount, // Read n entries at a time
    BLOCK: 0, // block for 0 (infinite) seconds if there are none.
    },
    );

    for (let data of dataArr) {
    for (let messageItem of data.messages) {
    // process the message received (in our case, perform payment)
    await onMessage(messageItem.message, messageItem.id);

    // acknowledge individual messages after processing
    nodeRedisClient.xAck(streamKeyName, groupName, messageItem.id);
    }
    }
    }
    }

    // `listenToStream` listens for events and calls the `onMessage` callback to further handle the events.
    listenToStream({
    onMessage: processPaymentForNewOrders,
    });

    const processPaymentForNewOrders: IMessageHandler = async (
    message,
    messageId,
    ) => {
    /*
    message = {
    orderId: "",
    orderAmount: "",
    userId: "",
    }
    */
    // process payment for new orderId and insert "payments" data to database
    };
    note

    There are a few important things to note here:

    1. Make sure the stream group doesn't exist prior to creating it.
    2. Use isolated: true, in order to use the blocking version of XREADGROUP in isolated execution mode.
    3. Acknowledge individual messages after you process them to remove the messages from the pending orders queue and to avoid processing them more than once.

    Producer 2 (payments service)

    1. The payments service appends minimal data to PAYMENTS_STREAM to signal that a payment has been fulfilled.
    server/src/services/payments/src/service-impl.ts
    const addPaymentIdToStream = async (
    orderId: string,
    paymentId: string,
    orderStatus: number,
    userId: string,
    ) => {
    const nodeRedisClient = getNodeRedisClient();
    if (orderId && nodeRedisClient) {
    const streamKeyName = 'PAYMENTS_STREAM';
    const entry = {
    orderId: orderId,
    paymentId: paymentId,
    orderStatusCode: orderStatus.toString(),
    userId: userId,
    };
    const id = '*'; //* = auto generate
    //xAdd adds entry to specified stream
    await nodeRedisClient.xAdd(streamKeyName, id, entry);
    }
    };

    Consumer 2 (orders service)

    1. The orders service listens to the PAYMENTS_STREAM and updates the order when payments are fulfilled.
    server/src/services/orders/src/service-impl.ts
    //Below is some code for how you would use Redis to listen for the stream events:

    async function listenToStream(
    onMessage: (message: any, messageId: string) => Promise<void>,
    ) {
    // using node-redis
    const redis = getNodeRedisClient();
    const streamKeyName = 'PAYMENTS_STREAM'; //stream name
    const groupName = 'PAYMENTS_CON_GROUP'; //listening consumer group name (custom)
    const consumerName = 'ORDERS_CON'; //listening consumer name (custom)
    const readMaxCount = 100;

    // Check if the stream group already exists
    if (!(await redis.exists(streamKeyName))) {
    const idPosition = '0'; //0 = start, $ = end or any specific id
    await nodeRedisClient.xGroupCreate(streamKeyName, groupName, idPosition, {
    MKSTREAM: true,
    });
    }

    // setup a loop to listen for stream events
    while (true) {
    // read set of messages from different streams
    const dataArr = await nodeRedisClient.xReadGroup(
    commandOptions({
    isolated: true,
    }),
    groupName,
    consumerName,
    [
    {
    // you can specify multiple streams in array
    key: streamKeyName,
    id: '>', // Next entry ID that no consumer in this group has read
    },
    ],
    {
    COUNT: readMaxCount, // Read n entries at a time
    BLOCK: 0, // block for 0 (infinite) seconds if there are none.
    },
    );

    for (let data of dataArr) {
    for (let messageItem of data.messages) {
    //process the message received (in our case, updateOrderStatus)
    await onMessage(messageItem.message, messageItem.id);

    // acknowledge individual messages after processing
    nodeRedisClient.xAck(streamKeyName, groupName, messageItem.id);
    }
    }
    }
    }

    // `listenToStream` listens for events and calls the `onMessage` callback to further handle the events.
    listenToStream({
    onMessage: updateOrderStatus,
    });

    const updateOrderStatus: IMessageHandler = async (message, messageId) => {
    /*
    message = {
    orderId: "",
    paymentId: "",
    orderStatusCode:"",
    userId: "",
    }
    */
    // updates orderStatus and paymentId in database accordingly for the order which has fulfilled payment
    // updateOrderStatusInRedis(orderId,paymentId,orderStatusCode,userId)
    // updateOrderStatusInMongoDB(orderId,paymentId,orderStatusCode,userId)
    };
    tip

    It's a best practice to validate all incoming messages to make sure you can work with them.

    For the purposes of our application, we make a call to update the order status in both Redis and primary database in the same service (For simplicity, we are not using any synchronization technique between databases rather focusing on how the data is stored and accessed in Redis). Another common pattern is to have your services write to one database, and then separately use a CDC mechanism to update the other database. For example, you could write directly to Redis, then use Triggers and Functions to handle synchronizing Redis and primary database in the background.

    tip

    If you use Redis Enterprise, you will find that Redis Streams is available on the same multi-tenant data platform you already use for caching. Redis Enterprise also has high availability, message persistence, support for multiple clients, and resiliency with primary/secondary data replication… all built in.

    Ready to use Redis for streaming?

    That's all there is to it! You now know how to use Redis for streaming as both a producer and a consumer. Hopefully, you can draw some inspiration from this tutorial and apply it to your own event streaming application. For more on this topic, check out the additional resources below:

    Additional resources

    - + \ No newline at end of file diff --git a/howtos/solutions/mobile-banking/account-dashboard/index.html b/howtos/solutions/mobile-banking/account-dashboard/index.html index cdb279589e..035a838ff3 100644 --- a/howtos/solutions/mobile-banking/account-dashboard/index.html +++ b/howtos/solutions/mobile-banking/account-dashboard/index.html @@ -4,7 +4,7 @@ Mobile Banking Account Dashboard Using Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Mobile Banking Account Dashboard Using Redis

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone --branch v1.2.0 https://github.com/redis-developer/mobile-banking-solutions

    What is a mobile banking account dashboard?

    An account dashboard is a page in a mobile banking app that instantly renders account highlights to users. A customer can click on any of the accounts on the dashboard to see the real-time account details, such as latest transactions, mortgage amount they have left to pay, checking and savings, etc.

    An account dashboard makes a customer's finances easily visible in one place. It reduces financial complexity for the customer and fosters customer loyalty.

    The following diagram is an example data architecture for an account dashboard:

    dashboard

    1. Banks store information in a number of separate databases that support individual banking products
    2. Key customer account details (balances, recent transactions) across the banks product portfolio are prefetched into Redis Enterprise using Redis Data Integration (RDI)
    3. Redis Enterprise powers customer's account dashboards, enabling mobile banking users to view balances and other high-priority information immediately upon login

    Why you should use Redis for account dashboards in mobile banking

    • Resilience: Redis Enterprise provides resilience with 99.999% uptime and Active-Active Geo Distribution to prevent loss of critical user profile data

    • Scalability: Redis Enterprise provides < 1ms performance at incredibly high scale to ensure apps perform under peak loads

    • JSON Support: Provides the ability to create and store account information as JSON documents with the < 1ms speed of Redis

    • Querying and Indexing: Redis Enterprise can quickly identify and store data from multiple different databases and index data to make it readily searchable

    note

    Redis Stack supports the JSON data type and allows you to index and querying JSON and more. So your Redis data is not limited to simple key-value stringified data.

    Building an account dashboard with Redis

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone --branch v1.2.0 https://github.com/redis-developer/mobile-banking-solutions

    Download the above source code and run following command to start the demo application

    docker compose up -d

    After docker up & running, open http://localhost:8080/ url in browser to view application

    Data seeding

    This application leverages Redis core data structures, JSON, TimeSeries, Search and Query features. The data seeded is later used to show a searchable transaction overview with realtime updates as well as a personal finance management overview with realtime balance and biggest spenders updates.

    On application startup in app/server.js, a cron is scheduled to create random bank transactions at regular intervals and seed those transactions in to Redis.

    app/server.js
    //cron job to trigger createBankTransaction() at regular intervals

    cron.schedule('*/10 * * * * *', async () => {
    const userName = process.env.REDIS_USERNAME;

    createBankTransaction(userName);

    //...
    });
    • The transaction generator creates a randomized banking debit or credit which will reflect on a (default) starting user balance of $100,000.00
    • The transaction data is saved as a JSON document within Redis.
    • To capture balance over time, the balanceAfter value is recorded in a TimeSeries with the key balance_ts for every transaction.
    • To track biggest spenders, an associated fromAccountName member within the sorted set bigspenders is incremented by the transaction amount. Note that this amount can be positive or negative.
    app/transactions/transactionsGenerator.js
    let balance = 100000.0;
    const BALANCE_TS = 'balance_ts';
    const SORTED_SET_KEY = 'bigspenders';

    export const createBankTransaction = async () => {
    //to create random bank transaction
    let vendorsList = source.source; //app/transactions/transaction_sources.js
    const random = Math.floor(Math.random() * 9999999999);

    const vendor = vendorsList[random % vendorsList.length]; //random vendor from the list

    const amount = createTransactionAmount(vendor.fromAccountName, random);
    const transaction = {
    id: random * random,
    fromAccount: Math.floor((random / 2) * 3).toString(),
    fromAccountName: vendor.fromAccountName,
    toAccount: '1580783161',
    toAccountName: 'bob',
    amount: amount,
    description: vendor.description,
    transactionDate: new Date(),
    transactionType: vendor.type,
    balanceAfter: balance,
    };

    //redis json feature
    const bankTransaction = await bankTransactionRepository.save(transaction);
    console.log('Created bankTransaction!');
    // ...
    };

    const createTransactionAmount = (vendor, random) => {
    let amount = createAmount(); //random amount
    balance += amount;
    balance = parseFloat(balance.toFixed(2));

    //redis time series feature
    redis.ts.add(BALANCE_TS, '*', balance, { DUPLICATE_POLICY: 'first' });
    //redis sorted set as secondary index
    redis.zIncrBy(SORTED_SET_KEY, amount * -1, vendor);

    return amount;
    };

    Sample bankTransaction data view using RedisInsight

    bank transaction data

    bank transaction json

    tip

    Download RedisInsight to view your Redis data or to play with raw Redis commands in the workbench. Learn more by reading the RedisInsight tutorial

    Balance over time

    Dashboard widget

    Chart

    API endpoint

    Endpoint/transaction/balance
    Code Location/routers/transaction-router.js
    Parametersnone
    Return value[{x: timestamp, y: value}, ...]

    The balance endpoint leverages Time Series, It returns the range of all values from the time series object balance_ts. The resulting range is converted to an array of objects with each object containing an x property containing the timestamp and a y property containing the associated value. This endpoint supplies the time series chart with coordinates to plot a visualization of the balance over time.

    app/routers/transaction-router.js
    const BALANCE_TS = 'balance_ts';

    /* fetch transactions up to sometime ago */
    transactionRouter.get('/balance', async (req, res) => {
    //time series range
    const balance = await redis.ts.range(
    BALANCE_TS,
    Date.now() - 1000 * 60 * 5, //from
    Date.now(), //to
    );

    let balancePayload = balance.map((entry) => {
    return {
    x: entry.timestamp,
    y: entry.value,
    };
    });

    res.send(balancePayload);
    });

    Biggest spenders

    Dashboard widget

    Chart

    API end point

    Endpoint/transaction//biggestspenders
    Code Location/routers/transaction-router.js
    Parametersnone
    Return value{labels:[...], series:[...] }

    The biggest spenders endpoint leverages sorted sets as a secondary index, It retrieves all members of the sorted set bigspenders that have scores greater than zero. The top five or fewer are returned to provide the UI pie chart with data. The labels array contains the names of the biggest spenders and the series array contains the numeric values associated with each member name.

    app/routers/transaction-router.js
    const SORTED_SET_KEY = 'bigspenders';

    /* fetch top 5 biggest spenders */
    transactionRouter.get('/biggestspenders', async (req, res) => {
    const range = await redis.zRangeByScoreWithScores(
    SORTED_SET_KEY,
    0,
    Infinity,
    );
    let series = [];
    let labels = [];

    range.slice(0, 5).forEach((spender) => {
    series.push(parseFloat(spender.score.toFixed(2)));
    labels.push(spender.value);
    });

    res.send({ series, labels });
    });

    Search existing transactions

    Dashboard widget

    Search transactions

    API end point

    Endpoint/transaction/search
    Code Location/routers/transaction-router.js
    Query Parametersterm
    Return valuearray of results matching term

    The search endpoint leverages Search and Query, It receives a term query parameter from the UI. A Redis om Node query for the fields description, fromAccountName, and accountType will trigger and return results.

    app/routers/transaction-router.js
    transactionRouter.get('/search', async (req, res) => {
    const term = req.query.term;

    let results;

    if (term.length >= 3) {
    results = await bankRepo
    .search()
    .where('description')
    .matches(term)
    .or('fromAccountName')
    .matches(term)
    .or('transactionType')
    .equals(term)
    .return.all({ pageSize: 1000 });
    }
    res.send(results);
    });

    Get recent transactions

    Dashboard widget

    View recent transactions

    API end point

    Endpoint/transaction/transactions
    Code Location/routers/transaction-router.js
    Parametersnone
    Return valuearray of results

    Even the transactions endpoint leverages Search and Query. A Redis om Node query will trigger and return ten most recent transactions.

    app/routers/transaction-router.js
    /* return ten most recent transactions */
    transactionRouter.get('/transactions', async (req, res) => {
    const transactions = await bankRepo
    .search()
    .sortBy('transactionDate', 'DESC')
    .return.all({ pageSize: 10 });

    res.send(transactions.slice(0, 10));
    });

    Ready to use Redis in account dashboard?

    Hopefully, this tutorial has helped you visualize how to use Redis for account dashboard, specifically in the context of mobile banking. For additional resources related to this topic, check out the links below:

    Additional resources

    - + \ No newline at end of file diff --git a/howtos/solutions/mobile-banking/common-mb/additional-resources/index.html b/howtos/solutions/mobile-banking/common-mb/additional-resources/index.html index 21e4f26a9f..58fc7cdac3 100644 --- a/howtos/solutions/mobile-banking/common-mb/additional-resources/index.html +++ b/howtos/solutions/mobile-banking/common-mb/additional-resources/index.html @@ -4,7 +4,7 @@ additional-resources | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/howtos/solutions/mobile-banking/common-mb/data-seeding/index.html b/howtos/solutions/mobile-banking/common-mb/data-seeding/index.html index f35ee8e159..e23a096684 100644 --- a/howtos/solutions/mobile-banking/common-mb/data-seeding/index.html +++ b/howtos/solutions/mobile-banking/common-mb/data-seeding/index.html @@ -4,7 +4,7 @@ data-seeding | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    data-seeding

    This application leverages Redis core data structures, JSON, TimeSeries, Search and Query features. The data seeded is later used to show a searchable transaction overview with realtime updates as well as a personal finance management overview with realtime balance and biggest spenders updates.

    On application startup in app/server.js, a cron is scheduled to create random bank transactions at regular intervals and seed those transactions in to Redis.

    app/server.js
    //cron job to trigger createBankTransaction() at regular intervals

    cron.schedule('*/10 * * * * *', async () => {
    const userName = process.env.REDIS_USERNAME;

    createBankTransaction(userName);

    //...
    });
    • The transaction generator creates a randomized banking debit or credit which will reflect on a (default) starting user balance of $100,000.00
    • The transaction data is saved as a JSON document within Redis.
    • To capture balance over time, the balanceAfter value is recorded in a TimeSeries with the key balance_ts for every transaction.
    • To track biggest spenders, an associated fromAccountName member within the sorted set bigspenders is incremented by the transaction amount. Note that this amount can be positive or negative.
    app/transactions/transactionsGenerator.js
    let balance = 100000.0;
    const BALANCE_TS = 'balance_ts';
    const SORTED_SET_KEY = 'bigspenders';

    export const createBankTransaction = async () => {
    //to create random bank transaction
    let vendorsList = source.source; //app/transactions/transaction_sources.js
    const random = Math.floor(Math.random() * 9999999999);

    const vendor = vendorsList[random % vendorsList.length]; //random vendor from the list

    const amount = createTransactionAmount(vendor.fromAccountName, random);
    const transaction = {
    id: random * random,
    fromAccount: Math.floor((random / 2) * 3).toString(),
    fromAccountName: vendor.fromAccountName,
    toAccount: '1580783161',
    toAccountName: 'bob',
    amount: amount,
    description: vendor.description,
    transactionDate: new Date(),
    transactionType: vendor.type,
    balanceAfter: balance,
    };

    //redis json feature
    const bankTransaction = await bankTransactionRepository.save(transaction);
    console.log('Created bankTransaction!');
    // ...
    };

    const createTransactionAmount = (vendor, random) => {
    let amount = createAmount(); //random amount
    balance += amount;
    balance = parseFloat(balance.toFixed(2));

    //redis time series feature
    redis.ts.add(BALANCE_TS, '*', balance, { DUPLICATE_POLICY: 'first' });
    //redis sorted set as secondary index
    redis.zIncrBy(SORTED_SET_KEY, amount * -1, vendor);

    return amount;
    };

    Sample bankTransaction data view using RedisInsight

    bank transaction data

    bank transaction json

    tip

    Download RedisInsight to view your Redis data or to play with raw Redis commands in the workbench. Learn more by reading the RedisInsight tutorial

    - + \ No newline at end of file diff --git a/howtos/solutions/mobile-banking/common-mb/source-code-tip/index.html b/howtos/solutions/mobile-banking/common-mb/source-code-tip/index.html index a8707225f7..22ea3b671b 100644 --- a/howtos/solutions/mobile-banking/common-mb/source-code-tip/index.html +++ b/howtos/solutions/mobile-banking/common-mb/source-code-tip/index.html @@ -4,7 +4,7 @@ source-code-tip | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/howtos/solutions/mobile-banking/session-management/index.html b/howtos/solutions/mobile-banking/session-management/index.html index e53c85ea10..2f87c4ef9c 100644 --- a/howtos/solutions/mobile-banking/session-management/index.html +++ b/howtos/solutions/mobile-banking/session-management/index.html @@ -4,7 +4,7 @@ Mobile Banking Authentication and Session Storage Using Redis | The Home of Redis Developers - + @@ -18,7 +18,7 @@ browser cookie entry

    Now on every other API request from client, connect-redis-stack library makes sure to load session details from redis to req.session variable based on the client cookie (sessionId).

    Balance API (Session storage)

    Consider the below /transaction/balance API code to demonstrate session storage.

    We have to modify the req.session variable to update session data. Let's add more session data like current balance amount of the user .

    app/routers/transaction-router.js
    /* fetch all transactions up to an hour ago /transaction/balance */
    transactionRouter.get('/balance', async (req, res) => {
    const balance = await redis.ts.range(
    BALANCE_TS,
    Date.now() - 1000 * 60 * 5,
    Date.now(),
    );

    let balancePayload = balance.map((entry) => {
    return {
    x: entry.timestamp,
    y: entry.value,
    };
    });

    let session = req.session;
    if (session.userid && balancePayload.length) {
    //adding latest BalanceAmount to session
    session.currentBalanceAmount = balancePayload[balancePayload.length - 1]; //updating session data
    }

    res.send(balancePayload);
    });
    • Updated session entry in Redis with currentBalanceAmount field ('x' denoting timestamp and 'y' denoting balance amount at that timestamp) session update

    • Verify the latest balance amount in the Dashboard UI

      dashboard balance

    Ready to use Redis in session management?

    Hopefully, this tutorial has helped you visualize how to use Redis for better session management, specifically in the context of mobile banking. For additional resources related to this topic, check out the links below:

    Additional resources

    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/available-to-promise/api/decrement-many-skus/index.html b/howtos/solutions/real-time-inventory/available-to-promise/api/decrement-many-skus/index.html index b90d92c190..283d4053d6 100644 --- a/howtos/solutions/real-time-inventory/available-to-promise/api/decrement-many-skus/index.html +++ b/howtos/solutions/real-time-inventory/available-to-promise/api/decrement-many-skus/index.html @@ -4,7 +4,7 @@ decrement-many-skus | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    decrement-many-skus

    The code that follows shows an example API request and response for decrementManySKUs activity.

    decrementManySKUs API Request

    POST http://localhost:3000/api/decrementManySKUs
    [{
    "sku":1019688,
    "quantity":4
    },{
    "sku":1003622,
    "quantity":2
    },{
    "sku":1006702,
    "quantity":2
    }]

    decrementManySKUs API Response

    {
    "data": [
    {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 28 //previous value 32
    },
    {
    "sku": 1003622,
    "name": "Aquarius - Fender Stratocaster 1,000-Piece Jigsaw Puzzle - Black/Red/White/Yellow/Green/Orange/Blue",
    "type": "HardGood",
    "totalQuantity": 8 //previous value 10
    },
    {
    "sku": 1006702,
    "name": "Clash of the Titans [DVD] [2010]",
    "type": "Movie",
    "totalQuantity": 8 //previous value 10
    }
    ],
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a decrementManySKUs function which looks as follows:

    src/inventory-service.ts
     static async decrementManySKUs(_productsFilter: IProductBodyFilter[]): Promise<IProduct[]> {
    /**
    decrement quantity of specific Products.

    :param _productWithIds: Product list with Id
    :return: Product list
    */
    let retItems: IProduct[] = [];

    if (_productsFilter && _productsFilter.length) {
    //validation only
    const promArr: Promise<boolean>[] = [];
    for (let p of _productsFilter) {
    if (p.sku) {
    //validating if all products in stock
    const promObj = InventoryServiceCls.validateQuantityOnDecrementSKU(p.sku, p.quantity);
    promArr.push(promObj)
    }
    }
    await Promise.all(promArr);

    //decrement only
    const promArr2: Promise<IProduct>[] = [];
    for (let p of _productsFilter) {
    if (p.sku && p.quantity) {
    const isDecrement = true; //increments with negative value
    const isReturnProduct = false;
    const promObj2 = InventoryServiceCls.incrementSKU(p.sku, p.quantity, isDecrement, isReturnProduct);
    promArr2.push(promObj2)
    }
    }
    await Promise.all(promArr2);


    //retrieve updated products
    retItems = await InventoryServiceCls.retrieveManySKUs(_productsFilter);
    }
    else {
    throw `Input params failed !`;
    }

    return retItems;
    }
    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/available-to-promise/api/decrement-sku/index.html b/howtos/solutions/real-time-inventory/available-to-promise/api/decrement-sku/index.html index 2ff7bfc493..c1a2c800bc 100644 --- a/howtos/solutions/real-time-inventory/available-to-promise/api/decrement-sku/index.html +++ b/howtos/solutions/real-time-inventory/available-to-promise/api/decrement-sku/index.html @@ -4,7 +4,7 @@ decrement-sku | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    decrement-sku

    The code that follows shows an example API request and response for decrementSKU activity.

    decrementSKU API Request

    POST http://localhost:3000/api/decrementSKU
    {
    "sku":1019688,
    "quantity":4
    }

    decrementSKU API Response

    {
    "data": {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 16 //previous value 20
    },
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a decrementSKU function which looks as follows:

    src/inventory-service.ts
    static async decrementSKU(_productId: number, _decrQuantity: number): Promise<IProduct> {
    /**
    decrement quantity of a Product.

    :param _productId: Product Id
    :param _decrQuantity: new decrement quantity
    :return: Product with Quantity
    */
    let retItem: IProduct = {};

    //validating if product in stock
    let isValid = await InventoryServiceCls.validateQuantityOnDecrementSKU(_productId, _decrQuantity);

    if (isValid) {
    const isDecrement = true; //increments with negative value
    const isReturnProduct = true;
    retItem = await InventoryServiceCls.incrementSKU(_productId, _decrQuantity, isDecrement, isReturnProduct);
    }

    return retItem;
    }

    static async validateQuantityOnDecrementSKU(_productId: number, _decrQuantity?: number): Promise<boolean> {
    let isValid = false;

    if (!_decrQuantity) {
    _decrQuantity = 1;
    }

    if (_productId) {
    const product = await InventoryServiceCls.retrieveSKU(_productId);
    if (product && product.totalQuantity && product.totalQuantity > 0
    && (product.totalQuantity - _decrQuantity >= 0)) {

    isValid = true;
    }
    else {
    throw `For product with Id ${_productId}, available quantity(${product.totalQuantity}) is lesser than decrement quantity(${_decrQuantity})`;
    }

    }
    return isValid;
    }
    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/available-to-promise/api/increment-sku/index.html b/howtos/solutions/real-time-inventory/available-to-promise/api/increment-sku/index.html index 0f0e03ce9e..6fb25b846c 100644 --- a/howtos/solutions/real-time-inventory/available-to-promise/api/increment-sku/index.html +++ b/howtos/solutions/real-time-inventory/available-to-promise/api/increment-sku/index.html @@ -4,7 +4,7 @@ increment-sku | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    increment-sku

    The code that follows shows an example API request and response for incrementSKU activity.

    incrementSKU API Request

    POST http://localhost:3000/api/incrementSKU
    {
    "sku":1019688,
    "quantity":2
    }

    incrementSKU API Response

    {
    "data": {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 12 //previous value 10
    },
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a incrementSKU function which looks as follows:

    src/inventory-service.ts
    static async incrementSKU(_productId: number, _incrQuantity: number, _isDecrement: boolean, _isReturnProduct: boolean): Promise<IProduct> {
    /**
    increment quantity of a Product.

    :param _productId: Product Id
    :param _incrQuantity: new increment quantity
    :return: Product with Quantity
    */

    const redisOmClient = getRedisOmClient();
    let retItem: IProduct = {};

    if (!_incrQuantity) {
    _incrQuantity = 1;
    }
    if (_isDecrement) {
    _incrQuantity = _incrQuantity * -1;
    }
    if (redisOmClient && _productId && _incrQuantity) {

    const updateKey = `${ProductRepo.PRODUCT_KEY_PREFIX}:${_productId}`;

    //increment json number field by specific (positive/ negative) value
    await redisOmClient.redis?.json.numIncrBy(updateKey, '$.totalQuantity', _incrQuantity);

    if (_isReturnProduct) {
    retItem = await InventoryServiceCls.retrieveSKU(_productId);
    }

    }
    else {
    throw `Input params failed !`;
    }

    return retItem;
    }
    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/available-to-promise/api/retrieve-many-skus/index.html b/howtos/solutions/real-time-inventory/available-to-promise/api/retrieve-many-skus/index.html index 2eb4c8e354..44c2ad864e 100644 --- a/howtos/solutions/real-time-inventory/available-to-promise/api/retrieve-many-skus/index.html +++ b/howtos/solutions/real-time-inventory/available-to-promise/api/retrieve-many-skus/index.html @@ -4,7 +4,7 @@ retrieve-many-skus | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    retrieve-many-skus

    The code that follows shows an example API request and response for retrieveManySKUs activity.

    retrieveManySKUs API Request

    POST http://localhost:3000/api/retrieveManySKUs
    [{
    "sku":1019688
    },{
    "sku":1003622
    },{
    "sku":1006702
    }]

    retrieveManySKUs API Response

    {
    "data": [
    {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 24
    },
    {
    "sku": 1003622,
    "name": "Aquarius - Fender Stratocaster 1,000-Piece Jigsaw Puzzle - Black/Red/White/Yellow/Green/Orange/Blue",
    "type": "HardGood",
    "totalQuantity": 10
    },
    {
    "sku": 1006702,
    "name": "Clash of the Titans [DVD] [2010]",
    "type": "Movie",
    "totalQuantity": 10
    }
    ],
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a retrieveManySKUs function which looks as follows:

    src/inventory-service.ts
    static async retrieveManySKUs(_productWithIds: IProductBodyFilter[]): Promise<IProduct[]> {
    /**
    Get current Quantity of specific Products.

    :param _productWithIds: Product list with Id
    :return: Product list
    */
    const repository = ProductRepo.getRepository();
    let retItems: IProduct[] = [];

    if (repository && _productWithIds && _productWithIds.length) {

    //string id array
    const idArr = _productWithIds.map((product) => {
    return product.sku?.toString() || ""
    });

    //fetch products by IDs (using redis om library)
    const result = await repository.fetch(...idArr);

    let productsArr: IProduct[] = [];

    if (idArr.length == 1) {
    productsArr = [<IProduct>result];
    }
    else {
    productsArr = <IProduct[]>result;
    }

    if (productsArr && productsArr.length) {

    retItems = productsArr.map((product) => {
    return {
    sku: product.sku,
    name: product.name,
    type: product.type,
    totalQuantity: product.totalQuantity
    }
    });
    }
    else {
    throw `No products found !`;
    }
    }
    else {
    throw `Input params failed !`;
    }

    return retItems;
    }
    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/available-to-promise/api/retrieve-sku/index.html b/howtos/solutions/real-time-inventory/available-to-promise/api/retrieve-sku/index.html index 35e97aeece..4fa3f6ad32 100644 --- a/howtos/solutions/real-time-inventory/available-to-promise/api/retrieve-sku/index.html +++ b/howtos/solutions/real-time-inventory/available-to-promise/api/retrieve-sku/index.html @@ -4,7 +4,7 @@ retrieve-sku | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    retrieve-sku

    The code that follows shows an example API request and response for retrieveSKU activity.

    retrieveSKU API Request

    GET http://localhost:3000/api/retrieveSKU?sku=1019688

    retrieveSKU API Response

    {
    "data": {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 10
    },
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a retrieveSKU function which looks as follows:

    code

    src/inventory-service.ts

    static async retrieveSKU(_productId: number): Promise<IProduct> {
    /**
    Get current Quantity of a Product.

    :param _productId: Product Id
    :return: Product with Quantity
    */
    const repository = ProductRepo.getRepository();
    let retItem: IProduct = {};

    if (repository && _productId) {
    //fetch product by ID (using redis om library)
    const product = <IProduct>await repository.fetch(_productId.toString());

    if (product) {
    retItem = {
    sku: product.sku,
    name: product.name,
    type: product.type,
    totalQuantity: product.totalQuantity
    }
    }
    else {
    throw `Product with Id ${_productId} not found`;
    }
    }
    else {
    throw `Input params failed !`;
    }

    return retItem;
    }

    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/available-to-promise/api/update-sku/index.html b/howtos/solutions/real-time-inventory/available-to-promise/api/update-sku/index.html index c17e83ffc9..424df4f3c8 100644 --- a/howtos/solutions/real-time-inventory/available-to-promise/api/update-sku/index.html +++ b/howtos/solutions/real-time-inventory/available-to-promise/api/update-sku/index.html @@ -4,7 +4,7 @@ update-sku | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    update-sku

    The code that follows shows an example API request and response for updateSKU activity.

    updateSKU API Request

    POST http://localhost:3000/api/updateSKU
    {
    "sku":1019688,
    "quantity":25
    }

    updateSKU API Response

    {
    "data": {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 25 //updated value
    },
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a updateSKU function which looks as follows:

    src/inventory-service.ts
     static async updateSKU(_productId: number, _quantity: number): Promise<IProduct> {
    /**
    Set Quantity of a Product.

    :param _productId: Product Id
    :param _quantity: new quantity
    :return: Product with Quantity
    */
    const repository = ProductRepo.getRepository();
    let retItem: IProduct = {};

    if (repository && _productId && _quantity >= 0) {
    //fetch product by ID (using redis om library)
    const product = <IProduct>await repository.fetch(_productId.toString());

    if (product) {
    //update the product fields
    product.totalQuantity = _quantity;

    // save the modified product
    const savedItem = <IProduct>await repository.save(<RedisEntity>product);

    retItem = {
    sku: savedItem.sku,
    name: savedItem.name,
    type: savedItem.type,
    totalQuantity: savedItem.totalQuantity
    }
    }
    else {
    throw `Product with Id ${_productId} not found`;
    }
    }
    else {
    throw `Input params failed !`;
    }

    return retItem;
    }
    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/available-to-promise/index.html b/howtos/solutions/real-time-inventory/available-to-promise/index.html index 93ae74270b..d9f45f08ad 100644 --- a/howtos/solutions/real-time-inventory/available-to-promise/index.html +++ b/howtos/solutions/real-time-inventory/available-to-promise/index.html @@ -4,7 +4,7 @@ Available to Promise in Real-time Inventory Using Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Available to Promise in Real-time Inventory Using Redis


    Profile picture for Prasan Kumar
    Author:
    Prasan Kumar, Technical Solutions Developer at Redis
    Profile picture for Will Johnston
    Author:
    Will Johnston, Developer Growth Manager at Redis

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone https://github.com/redis-developer/redis-real-time-inventory-solutions

    What is available-to-promise (ATP)?

    The major requirement in a retail inventory system is presenting an accurate, real-time view of inventory to shoppers and store associates enabling buy-online-pickup-in-store (BOPIS). Optimizing fulfillment from multiple inventory locations.

    Available to promise (ATP) is the projected amount of inventory left available to sell, not including allocated inventory. It allows businesses to control distribution to their customers and predict inventory. The ATP model helps retailers keep inventory costs down such as ordering costs, carrying costs and stock-out costs. ATP is helpful as long as consumer buying forecasts remain correct. Implementing ATP processes effectively for retailers can mean the difference between sustained growth and an inventory that repeatedly runs out of customer's favorite products missing sales opportunities and harming customer experience.

    How to calculate available-to-promise

    Calculating available-to-promise is a relatively simple undertaking. Complete the following formula for an accurate breakdown of available-to-promise capabilities:

    Available-to-promise = QuantityOnHand + Supply - Demand

    This formula includes the following elements:

    • QuantityOnHand: the total number of products that are immediately available to a company
    • Supply: the total stock of a product available for sale
    • Demand: the amount of a specific product that consumers are willing to purchase

    Current challenges in real time inventory

    • Over and under-stocking: While adopting a multi-channel business model (online & in store), lack of inventory visibility results in over and under-stocking of inventory in different regions and stores.

    • Consumers seek convenience: The ability to search across regional store locations and pickup merchandise immediately rather than wait for shipping is a key differentiator for retailers.

    • Consumers seek speed: All retailers, even small or family-run, must compete against the customer experience of large online retailers like Alibaba, FlipKart, Shopee, and Amazon.

    • High inventory costs: Retailers seek to lower inventory costs by eliminating missed sales from out-of-stock scenarios which also leads to higher “inventory turnover ratios.”

    • Brand value: Inaccurate store inventory counts lead to frustrated customers and lower sales. The operational pain will impact the status quo.

    Why you should use Redis for available-to-promise

    • Increased inventory visibility: Redis Enterprise provides highly scalable, real-time inventory synchronization between stores providing views into what stock is Available-To-Promise. Customers want to buy from a retailer who can check stock across multiple locations and provide real-time views on what's available locally.

    • Enhanced customer experience: Sub-millisecond latency means online customers can easily get real-time views of shopping carts, pricing, and in stock availability. Redis Enterprise built-in search engine delivers full text and aggregated faceted search of inventory in real time, scaling performance to instantly search inventories with millions of product types helping customers fill their shopping carts faster, keeping them engaged and loyal.

    • Cost efficiency at scale: Redis Enterprise offers real-time, bi-directional consistency between stores and data integration capabilities with enterprise systems without the complexity and costs of managing message brokers, auditing, and reconciliation.

    Real time inventory with Redis

    atp

    Using Redis, System delivers real-time synchronization of inventory across stores, in transit and warehouses. Provide retailers the most accurate, timely data on inventory across their entire store network and consumers positive customer experiences searching and locating inventory.

    Redis Data Integration (RDI) capabilities enable accurate real-time inventory management and system of record synchronization. Redis advanced inventory search and query capabilities provide accurate available inventory information to multichannel and omnichannel customers and store associates.

    This solution increases inventory turnover ratios resulting in lower inventory costs, higher revenue and profits. It also reduces the impact of customer searches on Systems of Record and Inventory Management Systems (IMS).

    Customer proof points

    Building a real time inventory service with redis

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone https://github.com/redis-developer/redis-real-time-inventory-solutions

    Managing inventory or a SKU (stock keeping unit) process contains some activities like :

    1. RetrieveSKU : Fetch the current quantity of a product
    2. UpdateSKU : Update the latest quantity of a product
    3. IncrementSKU : Increment the quantity by a specific value (Say, when more products are procured)
    4. DecrementSKU : Decrement the quantity by a specific value (Say, after order fulfillment of the product)
    5. RetrieveManySKUs : Fetch the current quantity of multiple products (Say, to verify products in stock before payment)
    6. DecrementManySKUs: Decrement the quantity of multiple products (Say, after an order fulfillment with multiple products)

    RetrieveSKU

    The code that follows shows an example API request and response for retrieveSKU activity.

    retrieveSKU API Request

    GET http://localhost:3000/api/retrieveSKU?sku=1019688

    retrieveSKU API Response

    {
    "data": {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 10
    },
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a retrieveSKU function which looks as follows:

    code

    src/inventory-service.ts

    static async retrieveSKU(_productId: number): Promise<IProduct> {
    /**
    Get current Quantity of a Product.

    :param _productId: Product Id
    :return: Product with Quantity
    */
    const repository = ProductRepo.getRepository();
    let retItem: IProduct = {};

    if (repository && _productId) {
    //fetch product by ID (using redis om library)
    const product = <IProduct>await repository.fetch(_productId.toString());

    if (product) {
    retItem = {
    sku: product.sku,
    name: product.name,
    type: product.type,
    totalQuantity: product.totalQuantity
    }
    }
    else {
    throw `Product with Id ${_productId} not found`;
    }
    }
    else {
    throw `Input params failed !`;
    }

    return retItem;
    }

    UpdateSKU

    The code that follows shows an example API request and response for updateSKU activity.

    updateSKU API Request

    POST http://localhost:3000/api/updateSKU
    {
    "sku":1019688,
    "quantity":25
    }

    updateSKU API Response

    {
    "data": {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 25 //updated value
    },
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a updateSKU function which looks as follows:

    src/inventory-service.ts
     static async updateSKU(_productId: number, _quantity: number): Promise<IProduct> {
    /**
    Set Quantity of a Product.

    :param _productId: Product Id
    :param _quantity: new quantity
    :return: Product with Quantity
    */
    const repository = ProductRepo.getRepository();
    let retItem: IProduct = {};

    if (repository && _productId && _quantity >= 0) {
    //fetch product by ID (using redis om library)
    const product = <IProduct>await repository.fetch(_productId.toString());

    if (product) {
    //update the product fields
    product.totalQuantity = _quantity;

    // save the modified product
    const savedItem = <IProduct>await repository.save(<RedisEntity>product);

    retItem = {
    sku: savedItem.sku,
    name: savedItem.name,
    type: savedItem.type,
    totalQuantity: savedItem.totalQuantity
    }
    }
    else {
    throw `Product with Id ${_productId} not found`;
    }
    }
    else {
    throw `Input params failed !`;
    }

    return retItem;
    }

    IncrementSKU

    The code that follows shows an example API request and response for incrementSKU activity.

    incrementSKU API Request

    POST http://localhost:3000/api/incrementSKU
    {
    "sku":1019688,
    "quantity":2
    }

    incrementSKU API Response

    {
    "data": {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 12 //previous value 10
    },
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a incrementSKU function which looks as follows:

    src/inventory-service.ts
    static async incrementSKU(_productId: number, _incrQuantity: number, _isDecrement: boolean, _isReturnProduct: boolean): Promise<IProduct> {
    /**
    increment quantity of a Product.

    :param _productId: Product Id
    :param _incrQuantity: new increment quantity
    :return: Product with Quantity
    */

    const redisOmClient = getRedisOmClient();
    let retItem: IProduct = {};

    if (!_incrQuantity) {
    _incrQuantity = 1;
    }
    if (_isDecrement) {
    _incrQuantity = _incrQuantity * -1;
    }
    if (redisOmClient && _productId && _incrQuantity) {

    const updateKey = `${ProductRepo.PRODUCT_KEY_PREFIX}:${_productId}`;

    //increment json number field by specific (positive/ negative) value
    await redisOmClient.redis?.json.numIncrBy(updateKey, '$.totalQuantity', _incrQuantity);

    if (_isReturnProduct) {
    retItem = await InventoryServiceCls.retrieveSKU(_productId);
    }

    }
    else {
    throw `Input params failed !`;
    }

    return retItem;
    }

    DecrementSKU

    The code that follows shows an example API request and response for decrementSKU activity.

    decrementSKU API Request

    POST http://localhost:3000/api/decrementSKU
    {
    "sku":1019688,
    "quantity":4
    }

    decrementSKU API Response

    {
    "data": {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 16 //previous value 20
    },
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a decrementSKU function which looks as follows:

    src/inventory-service.ts
    static async decrementSKU(_productId: number, _decrQuantity: number): Promise<IProduct> {
    /**
    decrement quantity of a Product.

    :param _productId: Product Id
    :param _decrQuantity: new decrement quantity
    :return: Product with Quantity
    */
    let retItem: IProduct = {};

    //validating if product in stock
    let isValid = await InventoryServiceCls.validateQuantityOnDecrementSKU(_productId, _decrQuantity);

    if (isValid) {
    const isDecrement = true; //increments with negative value
    const isReturnProduct = true;
    retItem = await InventoryServiceCls.incrementSKU(_productId, _decrQuantity, isDecrement, isReturnProduct);
    }

    return retItem;
    }

    static async validateQuantityOnDecrementSKU(_productId: number, _decrQuantity?: number): Promise<boolean> {
    let isValid = false;

    if (!_decrQuantity) {
    _decrQuantity = 1;
    }

    if (_productId) {
    const product = await InventoryServiceCls.retrieveSKU(_productId);
    if (product && product.totalQuantity && product.totalQuantity > 0
    && (product.totalQuantity - _decrQuantity >= 0)) {

    isValid = true;
    }
    else {
    throw `For product with Id ${_productId}, available quantity(${product.totalQuantity}) is lesser than decrement quantity(${_decrQuantity})`;
    }

    }
    return isValid;
    }

    RetrieveManySKUs

    The code that follows shows an example API request and response for retrieveManySKUs activity.

    retrieveManySKUs API Request

    POST http://localhost:3000/api/retrieveManySKUs
    [{
    "sku":1019688
    },{
    "sku":1003622
    },{
    "sku":1006702
    }]

    retrieveManySKUs API Response

    {
    "data": [
    {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 24
    },
    {
    "sku": 1003622,
    "name": "Aquarius - Fender Stratocaster 1,000-Piece Jigsaw Puzzle - Black/Red/White/Yellow/Green/Orange/Blue",
    "type": "HardGood",
    "totalQuantity": 10
    },
    {
    "sku": 1006702,
    "name": "Clash of the Titans [DVD] [2010]",
    "type": "Movie",
    "totalQuantity": 10
    }
    ],
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a retrieveManySKUs function which looks as follows:

    src/inventory-service.ts
    static async retrieveManySKUs(_productWithIds: IProductBodyFilter[]): Promise<IProduct[]> {
    /**
    Get current Quantity of specific Products.

    :param _productWithIds: Product list with Id
    :return: Product list
    */
    const repository = ProductRepo.getRepository();
    let retItems: IProduct[] = [];

    if (repository && _productWithIds && _productWithIds.length) {

    //string id array
    const idArr = _productWithIds.map((product) => {
    return product.sku?.toString() || ""
    });

    //fetch products by IDs (using redis om library)
    const result = await repository.fetch(...idArr);

    let productsArr: IProduct[] = [];

    if (idArr.length == 1) {
    productsArr = [<IProduct>result];
    }
    else {
    productsArr = <IProduct[]>result;
    }

    if (productsArr && productsArr.length) {

    retItems = productsArr.map((product) => {
    return {
    sku: product.sku,
    name: product.name,
    type: product.type,
    totalQuantity: product.totalQuantity
    }
    });
    }
    else {
    throw `No products found !`;
    }
    }
    else {
    throw `Input params failed !`;
    }

    return retItems;
    }

    DecrementManySKUs

    The code that follows shows an example API request and response for decrementManySKUs activity.

    decrementManySKUs API Request

    POST http://localhost:3000/api/decrementManySKUs
    [{
    "sku":1019688,
    "quantity":4
    },{
    "sku":1003622,
    "quantity":2
    },{
    "sku":1006702,
    "quantity":2
    }]

    decrementManySKUs API Response

    {
    "data": [
    {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 28 //previous value 32
    },
    {
    "sku": 1003622,
    "name": "Aquarius - Fender Stratocaster 1,000-Piece Jigsaw Puzzle - Black/Red/White/Yellow/Green/Orange/Blue",
    "type": "HardGood",
    "totalQuantity": 8 //previous value 10
    },
    {
    "sku": 1006702,
    "name": "Clash of the Titans [DVD] [2010]",
    "type": "Movie",
    "totalQuantity": 8 //previous value 10
    }
    ],
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a decrementManySKUs function which looks as follows:

    src/inventory-service.ts
     static async decrementManySKUs(_productsFilter: IProductBodyFilter[]): Promise<IProduct[]> {
    /**
    decrement quantity of specific Products.

    :param _productWithIds: Product list with Id
    :return: Product list
    */
    let retItems: IProduct[] = [];

    if (_productsFilter && _productsFilter.length) {
    //validation only
    const promArr: Promise<boolean>[] = [];
    for (let p of _productsFilter) {
    if (p.sku) {
    //validating if all products in stock
    const promObj = InventoryServiceCls.validateQuantityOnDecrementSKU(p.sku, p.quantity);
    promArr.push(promObj)
    }
    }
    await Promise.all(promArr);

    //decrement only
    const promArr2: Promise<IProduct>[] = [];
    for (let p of _productsFilter) {
    if (p.sku && p.quantity) {
    const isDecrement = true; //increments with negative value
    const isReturnProduct = false;
    const promObj2 = InventoryServiceCls.incrementSKU(p.sku, p.quantity, isDecrement, isReturnProduct);
    promArr2.push(promObj2)
    }
    }
    await Promise.all(promArr2);


    //retrieve updated products
    retItems = await InventoryServiceCls.retrieveManySKUs(_productsFilter);
    }
    else {
    throw `Input params failed !`;
    }

    return retItems;
    }

    Ready to use Redis in a Real time inventory system?

    Hopefully, this tutorial has helped you visualize how to use Redis in a Real time inventory system for product availability across different location stores. For additional resources related to this topic, check out the links below:

    Additional resources

    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/common-rti/additional-resources/index.html b/howtos/solutions/real-time-inventory/common-rti/additional-resources/index.html index 470e5bb886..9ac890c617 100644 --- a/howtos/solutions/real-time-inventory/common-rti/additional-resources/index.html +++ b/howtos/solutions/real-time-inventory/common-rti/additional-resources/index.html @@ -4,7 +4,7 @@ additional-resources | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/common-rti/customer-proofs/index.html b/howtos/solutions/real-time-inventory/common-rti/customer-proofs/index.html index 5cfe83251c..6b7eca1a91 100644 --- a/howtos/solutions/real-time-inventory/common-rti/customer-proofs/index.html +++ b/howtos/solutions/real-time-inventory/common-rti/customer-proofs/index.html @@ -4,7 +4,7 @@ customer-proofs | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/common-rti/rti-challenges/index.html b/howtos/solutions/real-time-inventory/common-rti/rti-challenges/index.html index 81fdedfaf9..57e1957cef 100644 --- a/howtos/solutions/real-time-inventory/common-rti/rti-challenges/index.html +++ b/howtos/solutions/real-time-inventory/common-rti/rti-challenges/index.html @@ -4,7 +4,7 @@ rti-challenges | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    rti-challenges

    • Over and under-stocking: While adopting a multi-channel business model (online & in store), lack of inventory visibility results in over and under-stocking of inventory in different regions and stores.

    • Consumers seek convenience: The ability to search across regional store locations and pickup merchandise immediately rather than wait for shipping is a key differentiator for retailers.

    • Consumers seek speed: All retailers, even small or family-run, must compete against the customer experience of large online retailers like Alibaba, FlipKart, Shopee, and Amazon.

    • High inventory costs: Retailers seek to lower inventory costs by eliminating missed sales from out-of-stock scenarios which also leads to higher “inventory turnover ratios.”

    • Brand value: Inaccurate store inventory counts lead to frustrated customers and lower sales. The operational pain will impact the status quo.

    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/common-rti/source-code-tip/index.html b/howtos/solutions/real-time-inventory/common-rti/source-code-tip/index.html index 7c7f9a0a08..a64e29f0ff 100644 --- a/howtos/solutions/real-time-inventory/common-rti/source-code-tip/index.html +++ b/howtos/solutions/real-time-inventory/common-rti/source-code-tip/index.html @@ -4,7 +4,7 @@ source-code-tip | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/local-inventory-search/api/inventory-search-with-distance/index.html b/howtos/solutions/real-time-inventory/local-inventory-search/api/inventory-search-with-distance/index.html index 454cee4437..027118b080 100644 --- a/howtos/solutions/real-time-inventory/local-inventory-search/api/inventory-search-with-distance/index.html +++ b/howtos/solutions/real-time-inventory/local-inventory-search/api/inventory-search-with-distance/index.html @@ -4,7 +4,7 @@ inventory-search-with-distance | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    inventory-search-with-distance

    The code that follows shows an example API request and response for inventorySearchWithDistance API:

    inventorySearchWithDistance API Request

    POST http://localhost:3000/api/inventorySearchWithDistance
    {
    "sku": 1019688,
    "searchRadiusInKm": 500,
    "userLocation": {
    "latitude": 42.88023,
    "longitude": -78.878738
    }
    }

    inventorySearchWithDistance API Response

    inventorySearchWithDistance API Response
    {
    "data": [
    {
    "storeId": "02_NY_ROCHESTER",
    "storeLocation": {
    "longitude": -77.608849,
    "latitude": 43.156578
    },
    "sku": "1019688",
    "quantity": "38",
    "distInKm": "107.74513"
    },
    {
    "storeId": "05_NY_WATERTOWN",
    "storeLocation": {
    "longitude": -75.910759,
    "latitude": 43.974785
    },
    "sku": "1019688",
    "quantity": "31",
    "distInKm": "268.86249"
    },
    {
    "storeId": "10_NY_POUGHKEEPSIE",
    "storeLocation": {
    "longitude": -73.923912,
    "latitude": 41.70829
    },
    "sku": "1019688",
    "quantity": "45",
    "distInKm": "427.90787"
    }
    ],
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling an inventorySearchWithDistance function which looks as follows:

    src/inventory-service.ts
    /**
    * Search Product in stores within search radius, Also sort results by distance from current user location to store.
    *
    * :param _inventoryFilter: Product Id (sku), searchRadiusInKm and current userLocation
    * :return: Inventory product list
    */
    static async inventorySearchWithDistance(_inventoryFilter: IInventoryBodyFilter): Promise<IStoresInventory[]> {
    const nodeRedisClient = getNodeRedisClient();

    const repository = StoresInventoryRepo.getRepository();
    let retItems: IStoresInventory[] = [];

    if (nodeRedisClient && repository && _inventoryFilter?.sku
    && _inventoryFilter?.userLocation?.latitude
    && _inventoryFilter?.userLocation?.longitude) {

    const lat = _inventoryFilter.userLocation.latitude;
    const long = _inventoryFilter.userLocation.longitude;
    const radiusInKm = _inventoryFilter.searchRadiusInKm || 1000;

    const queryBuilder = repository.search()
    .where('sku')
    .eq(_inventoryFilter.sku)
    .and('quantity')
    .gt(0)
    .and('storeLocation')
    .inRadius((circle) => {
    return circle
    .latitude(lat)
    .longitude(long)
    .radius(radiusInKm)
    .kilometers
    });

    console.log(queryBuilder.query);
    /* Sample queryBuilder query
    ( ( (@sku:[1019688 1019688]) (@quantity:[(0 +inf]) ) (@storeLocation:[-78.878738 42.88023 500 km]) )
    */

    const indexName = `${StoresInventoryRepo.STORES_INVENTORY_KEY_PREFIX}:index`;
    const aggregator = await nodeRedisClient.ft.aggregate(
    indexName,
    queryBuilder.query,
    {
    LOAD: ["@storeId", "@storeLocation", "@sku", "@quantity"],
    STEPS: [{
    type: AggregateSteps.APPLY,
    expression: `geodistance(@storeLocation, ${long}, ${lat})/1000`,
    AS: 'distInKm'
    }, {
    type: AggregateSteps.SORTBY,
    BY: "@distInKm"
    }]
    });

    /* Sample command to run query directly on CLI
    FT.AGGREGATE StoresInventory:index '( ( (@sku:[1019688 1019688]) (@quantity:[(0 +inf]) ) (@storeLocation:[-78.878738 42.88023 500 km]) )' LOAD 4 @storeId @storeLocation @sku @quantity APPLY "geodistance(@storeLocation,-78.878738,42.88043)/1000" AS distInKm SORTBY 1 @distInKm
    */

    retItems = <IStoresInventory[]>aggregator.results;

    if (!retItems.length) {
    throw `Product not found with in ${radiusInKm}km range!`;
    }
    else {
    retItems = retItems.map((item) => {
    if (typeof item.storeLocation == "string") {
    const location = item.storeLocation.split(",");
    item.storeLocation = {
    longitude: Number(location[0]),
    latitude: Number(location[1]),
    }
    }
    return item;
    })
    }
    }
    else {
    throw `Input params failed !`;
    }
    return retItems;
    }
    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/local-inventory-search/api/inventory-search/index.html b/howtos/solutions/real-time-inventory/local-inventory-search/api/inventory-search/index.html index 16e7c8581b..7ff27c9700 100644 --- a/howtos/solutions/real-time-inventory/local-inventory-search/api/inventory-search/index.html +++ b/howtos/solutions/real-time-inventory/local-inventory-search/api/inventory-search/index.html @@ -4,7 +4,7 @@ inventory-search | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    inventory-search

    The code that follows shows an example API request and response for the inventorySearch API:

    inventorySearch API Request

    POST http://localhost:3000/api/inventorySearch
    {
    "sku":1019688,
    "searchRadiusInKm":500,
    "userLocation": {
    "latitude": 42.880230,
    "longitude": -78.878738
    }
    }

    inventorySearch API Response

    {
    "data": [
    {
    "storeId": "02_NY_ROCHESTER",
    "storeLocation": {
    "longitude": -77.608849,
    "latitude": 43.156578
    },
    "sku": 1019688,
    "quantity": 38
    },
    {
    "storeId": "05_NY_WATERTOWN",
    "storeLocation": {
    "longitude": -75.910759,
    "latitude": 43.974785
    },
    "sku": 1019688,
    "quantity": 31
    },
    {
    "storeId": "10_NY_POUGHKEEPSIE",
    "storeLocation": {
    "longitude": -73.923912,
    "latitude": 41.70829
    },
    "sku": 1019688,
    "quantity": 45
    }
    ],
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling an inventorySearch function which looks as follows:

    src/inventory-service.ts
    /**
    * Search Product in stores within search radius.
    *
    * :param _inventoryFilter: Product Id (sku), searchRadiusInKm and current userLocation
    * :return: Inventory product list
    */
    static async inventorySearch(_inventoryFilter: IInventoryBodyFilter): Promise<IStoresInventory[]> {
    const nodeRedisClient = getNodeRedisClient();

    const repository = StoresInventoryRepo.getRepository();
    let retItems: IStoresInventory[] = [];

    if (nodeRedisClient && repository && _inventoryFilter?.sku
    && _inventoryFilter?.userLocation?.latitude
    && _inventoryFilter?.userLocation?.longitude) {

    const lat = _inventoryFilter.userLocation.latitude;
    const long = _inventoryFilter.userLocation.longitude;
    const radiusInKm = _inventoryFilter.searchRadiusInKm || 1000;

    const queryBuilder = repository.search()
    .where('sku')
    .eq(_inventoryFilter.sku)
    .and('quantity')
    .gt(0)
    .and('storeLocation')
    .inRadius((circle) => {
    return circle
    .latitude(lat)
    .longitude(long)
    .radius(radiusInKm)
    .kilometers
    });

    console.log(queryBuilder.query);
    /* Sample queryBuilder query
    ( ( (@sku:[1019688 1019688]) (@quantity:[(0 +inf]) ) (@storeLocation:[-78.878738 42.88023 500 km]) )
    */

    retItems = <IStoresInventory[]>await queryBuilder.return.all();

    /* Sample command to run query directly on CLI
    FT.SEARCH StoresInventory:index '( ( (@sku:[1019688 1019688]) (@quantity:[(0 +inf]) ) (@storeLocation:[-78.878738 42.88023 500 km]) )'
    */


    if (!retItems.length) {
    throw `Product not found with in ${radiusInKm}km range!`;
    }
    }
    else {
    throw `Input params failed !`;
    }
    return retItems;
    }
    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/local-inventory-search/index.html b/howtos/solutions/real-time-inventory/local-inventory-search/index.html index 4de1ec0e6b..579eacd89f 100644 --- a/howtos/solutions/real-time-inventory/local-inventory-search/index.html +++ b/howtos/solutions/real-time-inventory/local-inventory-search/index.html @@ -4,7 +4,7 @@ Real-time Local Inventory Search Using Redis | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    Real-time Local Inventory Search Using Redis


    Profile picture for Prasan Kumar
    Author:
    Prasan Kumar, Technical Solutions Developer at Redis
    Profile picture for Will Johnston
    Author:
    Will Johnston, Developer Growth Manager at Redis

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone https://github.com/redis-developer/redis-real-time-inventory-solutions

    Real-time local inventory search is a method of utilizing advanced product search capabilities across a group of stores or warehouses in a region or geographic area by which a retailer can enhance the customer experience with a localized view of inventory while fulfilling orders from the closest store possible.

    Geospatial search of merchandise local to the consumer helps sell stock faster, lowers inventory levels, and thus increases inventory turnover ratio. Consumers locate a product online, place the order in their browser or mobile device, and pick up at nearest store location. This is called “buy-online-pickup-in-store” (BOPIS)

    Current challenges in real time inventory

    • Over and under-stocking: While adopting a multi-channel business model (online & in store), lack of inventory visibility results in over and under-stocking of inventory in different regions and stores.

    • Consumers seek convenience: The ability to search across regional store locations and pickup merchandise immediately rather than wait for shipping is a key differentiator for retailers.

    • Consumers seek speed: All retailers, even small or family-run, must compete against the customer experience of large online retailers like Alibaba, FlipKart, Shopee, and Amazon.

    • High inventory costs: Retailers seek to lower inventory costs by eliminating missed sales from out-of-stock scenarios which also leads to higher “inventory turnover ratios.”

    • Brand value: Inaccurate store inventory counts lead to frustrated customers and lower sales. The operational pain will impact the status quo.

    • Accurate location/regional inventory search: Redis Enterprise geospatial search capabilities enable retailers to provide local inventories by store location across geographies and regions based on a consumer's location. This enables a real-time view of store inventory and and seamless BOPIS shopping experience.

    • Consistent and accurate inventory view across multichannel and omnichannel experiences: Accurate inventory information no matter what channel the shopper is using, in-store, kiosk, online, or mobile. Redis Enterprise provides a single source of truth for inventory information across all channels.

    • Real-time search performance at scale: Redis Enterprise real-time search and query engine allows retailers to provide instant application and inventory search responses and scale performance effortlessly during peak periods.

    Real-time local inventory search with Redis

    local-search

    Redis provides geospatial search capabilities across a group of stores or warehouses in a region or geographic area allowing a retailer to quickly show the available inventory local to the customer.

    Redis Enterprise processes event streams, keeping store inventories up-to-date in real-time. This enhances the customer experience with localized, accurate search of inventory while fulfilling orders from the nearest and fewest stores possible.

    This solution lowers days sales of inventory (DSI), selling inventory faster and carrying less inventory for increased revenue generation and profits over a shorter time period.

    It also reduces fulfillment costs to home and local stores enhancing a retailer's ability to fulfill orders with the lowest delivery and shipping costs.

    Customer proof points

    Building a real time local inventory search with redis

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone https://github.com/redis-developer/redis-real-time-inventory-solutions

    Setting up the data

    Once the application source code is downloaded, run following commands to populate data in Redis:

    # install packages
    npm install

    # Seed data to Redis
    npm run seed

    The demo uses two collections:

    • Product collection: Stores product details like productId, name, price, image, and other details product data
    tip

    Download RedisInsight to view your Redis data or to play with raw Redis commands in the workbench. learn more about RedisInsight in tutorials

    • StoresInventory collection: Stores product quantity available at different local stores.

    For demo purpose, we are using the below regions in New York, US as store locations. Products are mapped to these location stores with a storeId and quantity.

    Regions in NewYork State

    inventory data

    Let's build the following APIs to demonstrate geospatial search using Redis:

    • InventorySearch API: Search Products in local stores within a search radius.
    • InventorySearchWithDistance API: Search Product in local stores within search radius and sort results by distance from current user location to store.

    InventorySearch API

    The code that follows shows an example API request and response for the inventorySearch API:

    inventorySearch API Request

    POST http://localhost:3000/api/inventorySearch
    {
    "sku":1019688,
    "searchRadiusInKm":500,
    "userLocation": {
    "latitude": 42.880230,
    "longitude": -78.878738
    }
    }

    inventorySearch API Response

    {
    "data": [
    {
    "storeId": "02_NY_ROCHESTER",
    "storeLocation": {
    "longitude": -77.608849,
    "latitude": 43.156578
    },
    "sku": 1019688,
    "quantity": 38
    },
    {
    "storeId": "05_NY_WATERTOWN",
    "storeLocation": {
    "longitude": -75.910759,
    "latitude": 43.974785
    },
    "sku": 1019688,
    "quantity": 31
    },
    {
    "storeId": "10_NY_POUGHKEEPSIE",
    "storeLocation": {
    "longitude": -73.923912,
    "latitude": 41.70829
    },
    "sku": 1019688,
    "quantity": 45
    }
    ],
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling an inventorySearch function which looks as follows:

    src/inventory-service.ts
    /**
    * Search Product in stores within search radius.
    *
    * :param _inventoryFilter: Product Id (sku), searchRadiusInKm and current userLocation
    * :return: Inventory product list
    */
    static async inventorySearch(_inventoryFilter: IInventoryBodyFilter): Promise<IStoresInventory[]> {
    const nodeRedisClient = getNodeRedisClient();

    const repository = StoresInventoryRepo.getRepository();
    let retItems: IStoresInventory[] = [];

    if (nodeRedisClient && repository && _inventoryFilter?.sku
    && _inventoryFilter?.userLocation?.latitude
    && _inventoryFilter?.userLocation?.longitude) {

    const lat = _inventoryFilter.userLocation.latitude;
    const long = _inventoryFilter.userLocation.longitude;
    const radiusInKm = _inventoryFilter.searchRadiusInKm || 1000;

    const queryBuilder = repository.search()
    .where('sku')
    .eq(_inventoryFilter.sku)
    .and('quantity')
    .gt(0)
    .and('storeLocation')
    .inRadius((circle) => {
    return circle
    .latitude(lat)
    .longitude(long)
    .radius(radiusInKm)
    .kilometers
    });

    console.log(queryBuilder.query);
    /* Sample queryBuilder query
    ( ( (@sku:[1019688 1019688]) (@quantity:[(0 +inf]) ) (@storeLocation:[-78.878738 42.88023 500 km]) )
    */

    retItems = <IStoresInventory[]>await queryBuilder.return.all();

    /* Sample command to run query directly on CLI
    FT.SEARCH StoresInventory:index '( ( (@sku:[1019688 1019688]) (@quantity:[(0 +inf]) ) (@storeLocation:[-78.878738 42.88023 500 km]) )'
    */


    if (!retItems.length) {
    throw `Product not found with in ${radiusInKm}km range!`;
    }
    }
    else {
    throw `Input params failed !`;
    }
    return retItems;
    }

    InventorySearchWithDistance API

    The code that follows shows an example API request and response for inventorySearchWithDistance API:

    inventorySearchWithDistance API Request

    POST http://localhost:3000/api/inventorySearchWithDistance
    {
    "sku": 1019688,
    "searchRadiusInKm": 500,
    "userLocation": {
    "latitude": 42.88023,
    "longitude": -78.878738
    }
    }

    inventorySearchWithDistance API Response

    inventorySearchWithDistance API Response
    {
    "data": [
    {
    "storeId": "02_NY_ROCHESTER",
    "storeLocation": {
    "longitude": -77.608849,
    "latitude": 43.156578
    },
    "sku": "1019688",
    "quantity": "38",
    "distInKm": "107.74513"
    },
    {
    "storeId": "05_NY_WATERTOWN",
    "storeLocation": {
    "longitude": -75.910759,
    "latitude": 43.974785
    },
    "sku": "1019688",
    "quantity": "31",
    "distInKm": "268.86249"
    },
    {
    "storeId": "10_NY_POUGHKEEPSIE",
    "storeLocation": {
    "longitude": -73.923912,
    "latitude": 41.70829
    },
    "sku": "1019688",
    "quantity": "45",
    "distInKm": "427.90787"
    }
    ],
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling an inventorySearchWithDistance function which looks as follows:

    src/inventory-service.ts
    /**
    * Search Product in stores within search radius, Also sort results by distance from current user location to store.
    *
    * :param _inventoryFilter: Product Id (sku), searchRadiusInKm and current userLocation
    * :return: Inventory product list
    */
    static async inventorySearchWithDistance(_inventoryFilter: IInventoryBodyFilter): Promise<IStoresInventory[]> {
    const nodeRedisClient = getNodeRedisClient();

    const repository = StoresInventoryRepo.getRepository();
    let retItems: IStoresInventory[] = [];

    if (nodeRedisClient && repository && _inventoryFilter?.sku
    && _inventoryFilter?.userLocation?.latitude
    && _inventoryFilter?.userLocation?.longitude) {

    const lat = _inventoryFilter.userLocation.latitude;
    const long = _inventoryFilter.userLocation.longitude;
    const radiusInKm = _inventoryFilter.searchRadiusInKm || 1000;

    const queryBuilder = repository.search()
    .where('sku')
    .eq(_inventoryFilter.sku)
    .and('quantity')
    .gt(0)
    .and('storeLocation')
    .inRadius((circle) => {
    return circle
    .latitude(lat)
    .longitude(long)
    .radius(radiusInKm)
    .kilometers
    });

    console.log(queryBuilder.query);
    /* Sample queryBuilder query
    ( ( (@sku:[1019688 1019688]) (@quantity:[(0 +inf]) ) (@storeLocation:[-78.878738 42.88023 500 km]) )
    */

    const indexName = `${StoresInventoryRepo.STORES_INVENTORY_KEY_PREFIX}:index`;
    const aggregator = await nodeRedisClient.ft.aggregate(
    indexName,
    queryBuilder.query,
    {
    LOAD: ["@storeId", "@storeLocation", "@sku", "@quantity"],
    STEPS: [{
    type: AggregateSteps.APPLY,
    expression: `geodistance(@storeLocation, ${long}, ${lat})/1000`,
    AS: 'distInKm'
    }, {
    type: AggregateSteps.SORTBY,
    BY: "@distInKm"
    }]
    });

    /* Sample command to run query directly on CLI
    FT.AGGREGATE StoresInventory:index '( ( (@sku:[1019688 1019688]) (@quantity:[(0 +inf]) ) (@storeLocation:[-78.878738 42.88023 500 km]) )' LOAD 4 @storeId @storeLocation @sku @quantity APPLY "geodistance(@storeLocation,-78.878738,42.88043)/1000" AS distInKm SORTBY 1 @distInKm
    */

    retItems = <IStoresInventory[]>aggregator.results;

    if (!retItems.length) {
    throw `Product not found with in ${radiusInKm}km range!`;
    }
    else {
    retItems = retItems.map((item) => {
    if (typeof item.storeLocation == "string") {
    const location = item.storeLocation.split(",");
    item.storeLocation = {
    longitude: Number(location[0]),
    latitude: Number(location[1]),
    }
    }
    return item;
    })
    }
    }
    else {
    throw `Input params failed !`;
    }
    return retItems;
    }

    Hopefully this tutorial has helped you visualize how to use Redis for real-time local inventory search across different regional stores. For additional resources related to this topic, check out the links below:

    Additional resources

    - + \ No newline at end of file diff --git a/howtos/solutions/vector/getting-started-vector/index.html b/howtos/solutions/vector/getting-started-vector/index.html index db9c1b2973..39cf98ffff 100644 --- a/howtos/solutions/vector/getting-started-vector/index.html +++ b/howtos/solutions/vector/getting-started-vector/index.html @@ -4,7 +4,7 @@ How to Perform Vector Similarity Search Using Redis in NodeJS | The Home of Redis Developers - + @@ -22,7 +22,7 @@ in a given vector space. Higher values indicate higher similarity. However, the raw values can be large for long vectors; hence, normalization is recommended for better interpretation. If the vectors are normalized, their dot product will be 1 if they are identical and 0 if they are orthogonal (uncorrelated).

    Considering our product 1 and product 2, let's compute the Inner Product across all features.

    sample

    tip

    Vectors can also be stored in databases in binary formats to save space. In practical applications, it's crucial to strike a balance between the dimensionality of the vectors (which impacts storage and computational costs) and the quality or granularity of the information they capture.

    Further reading

    - + \ No newline at end of file diff --git a/index.html b/index.html index 21abb2b323..0f0071093a 100644 --- a/index.html +++ b/index.html @@ -4,7 +4,7 @@ The Home of Redis Developers | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    The Home of
    Redis Developers

    Create

    Create a new database using cloud, Docker or from source

    Create a database
    Read More

    Develop

    Develop your application using your favorite language

    Code your application
    Read More

    Explore

    Insert,update and explore your database using RedisInsight

    Explore your data
    Read More

    Operate

    Provision Redis and accelerate app deployment using DevOps

    Operate your database
    Read More


    Redis Launchpad

    Resources
    & community

    The latest from your favorite community to support your Redis journey

    Redis Pods
    Podcast

    How Redis scales Groww’s investing platform to empower 10 Million+ customers

    Read More
    - + \ No newline at end of file diff --git a/lp/learn-and-earn-jwt/index.html b/lp/learn-and-earn-jwt/index.html index 45f6beec84..d9d561fb4c 100644 --- a/lp/learn-and-earn-jwt/index.html +++ b/lp/learn-and-earn-jwt/index.html @@ -4,7 +4,7 @@ Learn and Earn with Redis! | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Learn and Earn with Redis!

    Complete this short assessment for a chance to earn a $25 Amazon gift card! If you need help, all of the answers can be found in this e-book.

    To receive your gift card, you must be a legal resident of any of the 50 United States and D.C., Canada, Germany, Ireland, France, and the United Kingdom;
    - + \ No newline at end of file diff --git a/lp/thank-you/index.html b/lp/thank-you/index.html index cb04acf77c..598e1ee832 100644 --- a/lp/thank-you/index.html +++ b/lp/thank-you/index.html @@ -4,7 +4,7 @@ Learn and Earn with Redis! | The Home of Redis Developers - + @@ -12,7 +12,7 @@ - + \ No newline at end of file diff --git a/modules/index-modules/index.html b/modules/index-modules/index.html index 21fc1774e7..89c98b5cde 100644 --- a/modules/index-modules/index.html +++ b/modules/index-modules/index.html @@ -4,7 +4,7 @@ index-modules | The Home of Redis Developers - + @@ -18,7 +18,7 @@ hide_table_of_contents: true slug: /modules/ custom_edit_url:


    ~

    - + \ No newline at end of file diff --git a/modules/redisbloom/index.html b/modules/redisbloom/index.html index c9d101e1c1..f0c1eb2d2c 100644 --- a/modules/redisbloom/index.html +++ b/modules/redisbloom/index.html @@ -4,7 +4,7 @@ Probabilistic Data Structures | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Probabilistic Data Structures

    Redis Stack provides additional probabilistic data structures. It allows for solving computer science problems in a constant memory space with extremely fast processing and a low error rate. It supports scalable Bloom and Cuckoo filters to determine (with a specified degree of certainty) whether an item is present or absent from a collection.

    The four probabilistic data types:

    • Bloom filter: A probabilistic data structure that can test for presence. A Bloom filter is a data structure designed to tell you, rapidly and memory-efficiently, whether an element is present in a set. Bloom filters typically exhibit better performance and scalability when inserting items (so if you're often adding items to your dataset then Bloom may be ideal).
    • Cuckoo filter: An alternative to Bloom filters, Cuckoo filters comes with additional support for deletion of elements from a set. These filters are quicker on check operations.
    • Count-min sketch: A count-min sketch is generally used to determine the frequency of events in a stream. You can query the count-min sketch to get an estimate of the frequency of any given event.
    • Top-K: The Top-k probabilistic data structure is a deterministic algorithm that approximates frequencies for the top k items. With Top-K, you’ll be notified in real time whenever elements enter into or are expelled from your Top-K list. If an element add-command enters the list, the dropped element will be returned.

    Step 1. Register and subscribe

    Follow this link to register and subscribe to Redis Enterprise Cloud

    Redisbloom

    Step 2. Create a database with Redis Stack

    Redisbloom

    Step 3. Connect to a database

    Follow this link to know how to connect to a database

    Step 4. Getting Started with Probabilistic Data Structures

    In the next steps you will use some basic commands. You can run them from the Redis command-line interface (redis-cli) or use the CLI available in RedisInsight. (See part 2 of this tutorial to learn more about using the RedisInsight CLI.) To interact with Redis, you use the BF.ADD and BF.EXISTS commands.

    Let’s go ahead and test drive some probabilistic-specific operations. We will create a basic dataset based on unique visitors’ IP addresses, and you will see how to:

    • Create a Bloom filter
    • Determine whether or not an item exists in the Bloom filter
    • Add one or more items to the Bloom filter
    • Determine whether or not a unique visitor’s IP address exists

    Let’s walk through the process step-by-step:

    Create a Bloom filter

    Use the BF.ADD command to add a unique visitor IP address to the Bloom filter as shown here:

    >> BF.ADD unique_visitors 10.94.214.120
    (integer) 1
    (1.75s)

    Determine whether or not an item exists

    Use the BF.EXISTS command to determine whether or not an item may exist in the Bloom filter:

    >> BF.EXISTS unique_visitors 10.94.214.120
    (integer) 1
    >> BF.EXISTS unique_visitors 10.94.214.121
    (integer) 0
    (1.46s)

    In the above example, the first command shows the result as “1”, indicating that the item may exist, whereas the second command displays "0", indicating that the item certainly may not exist.

    Add one or more items to the Bloom filter

    Use the BF.MADD command to add one or more items to the Bloom filter, creating the filter if it does not yet exist. This command operates identically to BF.ADD, except it allows multiple inputs and returns multiple values:

    >> BF.MADD unique_visitors 10.94.214.100 10.94.214.200 10.94.214.210 10.94.214.212
    1) (integer) 1
    2) (integer) 1
    3) (integer) 1
    4) (integer) 1

    As shown above, the BF.MADD allows you to add one or more visitors’ IP addresses to the Bloom filter.

    Determine whether or not a unique visitor’s IP address exists

    Use BF.MEXISTS to determine if one or more items may exist in the filter or not:

    >> BF.MEXISTS unique_visitors 10.94.214.200 10.94.214.212
    1) (integer) 1
    2) (integer) 1
     >> BF.MEXISTS unique_visitors 10.94.214.200 10.94.214.213
    1) (integer) 1
    2) (integer) 0

    In the above example, the first command shows the result as “1” for both the visitors’ IP addresses, indicating that these items do exist. The second command displays "0" for one of the visitor’s IP addresses, indicating that the item certainly does not exist.

    Next Steps

    • Learn more about Probabilistic data in the Quick Start tutorial.
    - + \ No newline at end of file diff --git a/modules/redisearch/index.html b/modules/redisearch/index.html index 4064d8038c..4911daf6cc 100644 --- a/modules/redisearch/index.html +++ b/modules/redisearch/index.html @@ -4,7 +4,7 @@ Redis Search | The Home of Redis Developers - + @@ -16,7 +16,7 @@ release_year rating genre

    Before running queries on our new index, though, let’s take a closer look at the elements of the FT.CREATE command:

    • idx:movies: the name of the index, which you will use when doing queries
    • ON hash: the type of structure to be indexed. (Note that Redis Search 2.0 supports only the Hash structure, but this parameter will allow Redis Search to index other structures in the future.)
    • PREFIX 1 “movies:”: the prefix of the keys that should be indexed. This is a list, so since we want to index only movies:* keys the number is 1. If you want to index movies and TV shows with the same fields, you could use: PREFIX 2 “movies:” “tv_show:”
    • SCHEMA …: defines the schema, the fields, and their type to index. As you can see in the command, we are using TEXT, NUMERIC, and TAG, as well as SORTABLE parameters.

    The Redis Search 2.0 engine will scan the database using the PREFIX values, and update the index based on the schema definition. This makes it easy to add an index to an existing application that uses Hashes, there’s no need to change your code.

    Search the movies in the Redis Search index

    You can now use the FT.SEARCH to search your database, for example, to search all movies sorted by release year:

    >  FT.SEARCH idx:movies * SORTBY release_year ASC RETURN 2 title release_year
    1) (integer) 2
    2) "movies:1003"
    3) 1) "release_year"
    2) "1972"
    3) "title"
    4) "The Godfather"
    4) "movies:1002"
    5) 1) "release_year"
    2) "1980"
    3) "title"
    4) "Star Wars: Episode V - The Empire Strikes Back"

    You can also search “action” movies that contain “star” in the index (in our sample index, the term “star” will occur only in the title):

    >  FT.SEARCH idx:movies "star @genre:{action}" RETURN 2 title release_year
    1) (integer) 1
    2) "movies:1002"
    3) 1) "title"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "release_year"
    4) "1980"

    The FT.SEARCH command is the base command to search your database, it has many options and is associated with a powerful and rich query syntax that you can find in the documentation.

    tip

    You can also use the index to do data aggregation using the FT.AGGREGATE command.

    Next Steps

    - + \ No newline at end of file diff --git a/modules/redisgears/index.html b/modules/redisgears/index.html index 78e471c8bc..a90d05c23c 100644 --- a/modules/redisgears/index.html +++ b/modules/redisgears/index.html @@ -4,7 +4,7 @@ Triggers and Functions | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/modules/redisgraph/index.html b/modules/redisgraph/index.html index c98636291a..0733f237e8 100644 --- a/modules/redisgraph/index.html +++ b/modules/redisgraph/index.html @@ -4,7 +4,7 @@ RedisGraph | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    RedisGraph

    End-of-Life Notice

    Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

    End of support is scheduled for January 31, 2025.

    Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).

    RedisGraph is a Redis module that enables enterprises to process any kind of connected data much faster than with traditional relational or existing graph databases. RedisGraph implements a unique data storage and processing solution (with sparse-adjacency matrices and GraphBLAS) to deliver the fastest and most efficient way to store, manage, and process connected data in graphs. With RedisGraph, you can process complex transactions 10 - 600 times faster than with traditional graph solutions while using 50 - 60% less memory resources than other graph databases!

    Step 1. Register and subscribe

    Follow this link to register and subscribe to Redis Enterprise Cloud

    RedisGraph

    Step 2. Create a database with RedisGraph Module

    RedisGraph

    Step 3. Connect to a database

    Follow this link to know how to connect to a database

    Step 4. Getting Started with RedisGraph

    In the following steps, we will use some basic RediGraph commands to insert data into a graph and then query the graph. You can run them from the Redis command-line interface (redis-cli) or use the CLI available in RedisInsight. (See part 2 of this tutorial to learn more about using the RedisInsight CLI.)

    RedisGraph

    Step 5: Insert data into a graph

    Insert actors

    To interact with RedisGraph you will typically use the GRAPH.QUERY command and execute Cypher queries. Let’s start to insert some actors into the graph:movies graph name, which is automatically created using this command:

    >> GRAPH.QUERY graph:movies "CREATE (:Actor {name:'Mark Hamill', actor_id:1}), (:Actor {name:'Harrison Ford', actor_id:2}), (:Actor {name:'Carrie Fisher', actor_id:3})"

    1) 1) "Labels added: 1"
    2) "Nodes created: 3"
    3) "Properties set: 6"
    4) "Query internal execution time: 0.675400 milliseconds"

    This single query creates three actors, along with their names and unique IDs.

    Insert a movie

    > GRAPH.QUERY graph:movies "CREATE (:Movie {title:'Star Wars: Episode V - The Empire Strikes Back', release_year: 1980 , movie_id:1})"
    1) 1) "Labels added: 1"
    2) "Nodes created: 1"
    3) "Properties set: 3"
    4) "Query internal execution time: 0.392300 milliseconds"

    This single query creates a movie with a title, the release year, and an ID.

    Associate actors and movies

    The core of a graph is the relationships between the nodes, allowing the applications to navigate and query them. Let’s create a relationship between the actors and the movies:

    > GRAPH.QUERY graph:movies "MATCH (a:Actor),(m:Movie) WHERE a.actor_id = 1 AND m.movie_id = 1 CREATE (a)-[r:Acted_in {role:'Luke Skywalker'}]->(m) RETURN r"
    1) 1) "r"
    2) 1) 1) 1) 1) "id"
    2) (integer) 1
    2) 1) "type"
    2) "Acted_in"
    3) 1) "src_node"
    2) (integer) 0
    4) 1) "dest_node"
    2) (integer) 3
    5) 1) "properties"
    2) 1) 1) "role"
    2) "Luke Skywalker"
    3) 1) "Properties set: 1"
    2) "Relationships created: 1"
    3) "Query internal execution time: 0.664800 milliseconds"

    This command created a new relation indicating that the actor Mark Hamill acted in Star Wars: Episode V as Luke Skywalker.

    Let’s repeat this process for the other actors:

    > GRAPH.QUERY graph:movies "MATCH (a:Actor), (m:Movie) WHERE a.actor_id = 2 AND m.movie_id = 1 CREATE (a)-[r:Acted_in {role:'Han Solo'}]->(m) RETURN r"
    > GRAPH.QUERY graph:movies "MATCH (a:Actor), (m:Movie) WHERE a.actor_id = 3 AND m.movie_id = 1 CREATE (a)-[r:Acted_in {role:'Princess Leila'}]->(m) RETURN r"

    You can also do all of this in a single query, for example:

    > GRAPH.QUERY graph:movies "CREATE (:Actor {name:'Marlo Brando', actor_id:4})-[:Acted_in {role:'Don Vito Corleone'}]->(:Movie {title:'The Godfather', release_year: 1972 , movie_id:2})"

    1) 1) "Nodes created: 2"
    2) "Properties set: 6"
    3) "Relationships created: 1"
    4) "Query internal execution time: 0.848500 milliseconds"

    Querying the graph

    Now that you have data in your graph, you’re ready to ask some questions, such as:

    “What are the titles of all the movies?”

    > GRAPH.QUERY graph:movies "MATCH (m:Movie) RETURN m.title"

    1) 1) "m.title"
    2) 1) 1) "Star Wars: Episode V - The Empire Strikes Back"
    2) 1) "The Godfather"
    3) 1) "Query internal execution time: 0.349400 milliseconds"

    “What is the information for the movie with the ID of 1?”

    > GRAPH.QUERY graph:movies "MATCH (m:Movie) WHERE m.movie_id = 1 RETURN m"

    1) 1) "m"
    2) 1) 1) 1) 1) "id"
    2) (integer) 3
    2) 1) "labels"
    2) 1) "Movie"
    3) 1) "properties"
    2) 1) 1) "title"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    2) 1) "release_year"
    2) (integer) 1980
    3) 1) "movie_id"
    2) (integer) 1
    3) 1) "Query internal execution time: 0.365800 milliseconds"

    “Who are the actors in the movie 'Star Wars: Episode V - The Empire Strikes Back' and what roles did they play?”

    > GRAPH.QUERY graph:movies "MATCH (a:Actor)-[r:Acted_in]-(m:Movie) WHERE m.movie_id = 1 RETURN a.name,m.title,r.role"
    1) 1) "a.name"
    2) "m.title"
    3) "r.role"
    2) 1) 1) "Mark Hamill"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "Luke Skywalker"
    2) 1) "Harrison Ford"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "Han Solo"
    3) 1) "Carrie Fisher"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "Princess Leila"
    3) 1) "Query internal execution time: 0.641200 milliseconds"

    Visualizing graph databases with RedisInsight

    If you are using RedisInsight, you can visualize and navigate into the nodes and relationships graphically. Click on the RedisGraph menu entry on the left and enter the query:

    MATCH (m:Actor) return m

    Click on the Execute button, and double click on the actors to follow the relationships You should see a graph like this one:

    RedisGraph

    Next Steps

    • Learn more about RedisGraph in the Quickstart tutorial.
    - + \ No newline at end of file diff --git a/modules/redisjson/index.html b/modules/redisjson/index.html index c3b88cb6e8..710ef556af 100644 --- a/modules/redisjson/index.html +++ b/modules/redisjson/index.html @@ -4,7 +4,7 @@ Redis JSON | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Redis JSON

    Redis Stack provides in-memory manipulation of JSON documents at high velocity and volume. With Redis Stack, you can natively store document data in a hierarchical, tree-like format to scale and query documents efficiently, significantly improving performance over storing and manipulating JSON with Lua scripts and core Redis data structures.

    Step 1. Register and subscribe

    Follow this link to register and subscribe to Redis Enterprise Cloud

    Redis JSON

    Step 2. Create a database with Redis JSON Module

    Redis JSON

    Step 3. Connect to a database

    Follow this link to know how to connect to a database

    Step 4. Getting Started with Redis JSON

    The following steps use some basic Redis JSON commands. You can run them from the Redis command-line interface (redis-cli) or use the CLI available in RedisInsight.

    To interact with Redis JSON, you will most often use the JSON.SET and JSON.GET commands. Before using Redis JSON, you should familiarize yourself with its commands and syntax as detailed in the documentation: Redis JSON Commands.

    Let’s go ahead and test drive some JSON-specific operations for setting and retrieving a Redis key with a JSON value:

    • Scalar
    • Objects (including nested objects)
    • Arrays of JSON objects
    • JSON nested objects

    Scalar

    Under Redis JSON, a key can contain any valid JSON value. It can be scalar, objects or arrays. JSON scalar is basically a string. You will have to use the JSON.SET command to set the JSON value. For new Redis keys the path must be the root, so you will use “.” path in the example below. For existing keys, when the entire path exists, the value that it contains is replaced with the JSON value. Here you will use JSON.SET to set the JSON scalar value to “Hello JSON!” Scalar will contain a string that holds “Hello JSON!”

    >> JSON.SET scalar .  ' "Hello JSON!" '
    "OK"

    Use JSON.GET to return the value at path in JSON serialized form:

    >> JSON.GET scalar
    "\"Hello JSON!\""

    Objects

    Let’s look at a JSON object example. A JSON object contains data in the form of a key-value pair. The keys are strings and the values are the JSON types. Keys and values are separated by a colon. Each entry (key-value pair) is separated by a comma. The { (curly brace) represents the JSON object:

    {
    "employee": {
    "name": "alpha",
    "age": 40,
    "married": true
    }
    }

    Here is the command to insert JSON data into Redis:

    >> JSON.SET employee_profile . '{ "employee": { "name": "alpha", "age": 40,"married": true }  } '
    "OK"

    The subcommands below change the reply’s format and are all set to the empty string by default: INDENT sets the indentation string for nested levels . NEWLINE sets the string that’s printed at the end of each line. * SPACE sets the string that’s put between a key and a value:

    >> >> JSON.GET employee_profile
    "{\"employee\":{\"name\":\"alpha\",\"age\":40,\"married\":true}}"

    Retrieving a part of JSON document

    You can also retrieve a part of the JSON document from Redis. In the example below, “.ans” can be passed in the commandline to retrieve the value 4:

    >> JSON.SET object . '{"foo":"bar", "ans":"4" }'
    "OK"
    >> JSON.GET object
    "{\"foo\":\"bar\",\"ans\":\"4\"}"
    >> JSON.GET object .ans
    "\"4\""

    Retrieving the type of JSON data

    JSON.TYPE reports the type of JSON value at path and path defaults to root if not provided. If the key or path do not exist, null is returned.

    >> JSON.TYPE employee_profile
    "Object"

    JSON arrays of objects

    The JSON array represents an ordered list of values. A JSON array can store multiple values, including strings, numbers, or objects. In JSON arrays, values must be separated by a comma. The [ (square bracket) represents the JSON array. Let’s look at a simple JSON array example with four objects:

    {"employees":[
    {"name":"Alpha", "email":"alpha@gmail.com", "age":23},
    {"name":"Beta", "email":"beta@gmail.com", "age":28},
    {"name":"Gamma", "email":"gamma@gmail.com", "age":33},
    {"name":"Theta", "email":"theta@gmail.com", "age":41}
    ]}

    >> JSON.SET testarray . '{"employees":[ {"name":"Alpha", "email":"alpha@gmail.com", "age":23}, {"name":"Beta", "email":"beta@gmail.com", "age":28}, {"name":"Gamma", "email":"gamma@gmail.com", "age":33}, {"name":"Theta", "email":"theta@gmail.com", "age":41} ]} '
    "OK"

    >> JSON.GET testarray
    "{\"employees\":[{\"name\":\"Alpha\",\"email\":\
    alpha@gmail.com

    \",\"age\":23},{\"name\":\"Beta\",\"email\":\"beta@gmail.com....

    JSON nested objects

    A JSON object can also have another object. Here is a simple example of a JSON object having another object nested in it:

    >> JSON.SET employee_info . ' { "firstName": "Alpha",         "lastName": "K", "age": 23,        "address" : {            "streetAddress": "110 Fulbourn Road Cambridge",  "city": "San Francisco", "state": "California", "postalCode": "94016"  } } '
    "OK"
    >> JSON.GET employee_info
    "{\"firstName\":\"Alpha\",\"lastName\":\"K\",\"age\":23,\"address\":{\"streetAddress\":\"110 Fulbourn Road Cambridge\",\"city\":\"San Francisco\",\"state\":\"California\",\"postalCode\":\"94016\"}}"

    Next Steps

    - + \ No newline at end of file diff --git a/modules/redistimeseries/index.html b/modules/redistimeseries/index.html index 3a5f43c55f..684e977bf2 100644 --- a/modules/redistimeseries/index.html +++ b/modules/redistimeseries/index.html @@ -4,7 +4,7 @@ Redis Time Series | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    Redis Time Series

    RedisTimeseries is a Redis module that enhances your experience managing time-series data with Redis. It simplifies the use of Redis for time-series use cases such as internet of things (IoT) data, stock prices, and telemetry. With Redis Time Series, you can ingest and query millions of samples and events at the speed of Redis. Advanced tooling such as downsampling and aggregation ensure a small memory footprint without impacting performance. Use a variety of queries for visualization and monitoring with built-in connectors to popular monitoring tools like Grafana, Prometheus, and Telegraf.

    Step 1. Register and subscribe

    Follow this link to register and subscribe to Redis Enterprise Cloud

    Redistimeseries

    Step 2. Create a database with Redis Time Series Module

    Redistimeseries

    Step 3. Connect to a database

    Follow this link to know how to connect to a database

    Step 4. Getting Started with Redis Time Series

    This section will walk you through using some basic RedisTimeseries commands. You can run them from the Redis command-line interface (redis-cli) or use the CLI available in RedisInsight. (See part 2 of this tutorial to learn more about using the RedisInsight CLI.) Using a basic air-quality dataset, we will show you how to:

    • Create a new time series
    • Add a new sample to the list of series
    • Query a range across one or multiple time series

    Redis Time Series

    Create a new time series

    Let’s create a time series representing air quality dataset measurements. To interact with Redis Time Series you will most often use the TS.RANGE command, but here you will create a time series per measurement using the TS.CREATE command. Once created, all the measurements will be sent using TS.ADD.

    The sample command below creates a time series and populates it with three entries:

    >> TS.CREATE ts:carbon_monoxide
    >> TS.CREATE ts:relative_humidity
    >> TS.CREATE ts:temperature RETENTION 60 LABELS sensor_id 2 area_id 32

    In the above example, ts:carbon_monoxide, ts:relative_humidity and ts:temperature are key names. We are creating a time series with two labels (sensor_id and area_id are the fields with values 2 and 32 respectively) and a retention window of 60 milliseconds:

    Add a new sample data to the time series

    Let’s start to add samples into the keys that will be automatically created using this command:

    >> TS.ADD ts:carbon_monoxide 1112596200 2.4
    >> TS.ADD ts:relative_humidity 1112596200 18.3
    >> TS.ADD ts:temperature 1112599800 28.3
    >> TS.ADD ts:carbon_monoxide 1112599800 2.1
    >> TS.ADD ts:relative_humidity 1112599800 13.5
    >> TS.ADD ts:temperature 1112603400 28.5
    >> TS.ADD ts:carbon_monoxide 1112603400 2.2
    >> TS.ADD ts:relative_humidity 1112603400 13.1
    >> TS.ADD ts:temperature 1112607000 28.7

    Querying the sample

    Now that you have sample data in your time series, you’re ready to ask questions such as:

    “How do I get the last sample?”

    TS.GET is used to get the last sample. The returned array will contain the last sample timestamp followed by the last sample value, when the time series contains data:

    >> TS.GET ts:temperature
    1) (integer) 1112607000
    2) "28.7"

    “How do I get the last sample matching the specific filter?”

    TS.MGET is used to get the last samples matching the specific filter:

    >> TS.MGET FILTER area_id=32
    1) 1) "ts:temperature"
    2) (empty list or set)
    3) 1) (integer) 1112607000
    2) "28.7"

    “How do I get the sample with labels matching the specific filter?”

    >> TS.MGET WITHLABELS FILTER area_id=32
    1) 1) "ts:temperature"
    2) 1) 1) "sensor_id"
    2) "2"
    2) 1) "area_id"
    2) "32"
    3) 1) (integer) 1112607000
    2) "28.7"

    Query a range across one or more time series

    TS.RANGE is used to query a range in forward directions while TS.REVRANGE is used to query a range in reverse directions, They let you answer such questions as:

    “How do I get the sample for a time range?”

    >> TS.RANGE ts:carbon_monoxide 1112596200 1112603400
    1) 1) (integer) 1112596200
    2) "2.4"
    2) 1) (integer) 1112599800
    2) "2.1"
    3) 1) (integer) 1112603400
    2) "2.2"

    Aggregation

    You can use various aggregation types such as avg, sum, min, max, range, count, first, last etc. The example below example shows how to use “avg” aggregation type to answer such questions as:

    “How do I get the sample for a time range on some aggregation rule?”

    >> TS.RANGE ts:carbon_monoxide 1112596200 1112603400 AGGREGATION avg 2
    1) 1) (integer) 1112596200
    2) "2.4"
    2) 1) (integer) 1112599800
    2) "2.1"
    3) 1) (integer) 1112603400
    2) "2.2"

    Next Steps

    • Learn more about Redis Time Series in the Quickstart tutorial.
    - + \ No newline at end of file diff --git a/operate/continuous-integration-continuous-deployment/argocd/index.html b/operate/continuous-integration-continuous-deployment/argocd/index.html index cd611c676d..5ec6595f37 100644 --- a/operate/continuous-integration-continuous-deployment/argocd/index.html +++ b/operate/continuous-integration-continuous-deployment/argocd/index.html @@ -4,7 +4,7 @@ Argo CD: What It Is And Why It Should Be Part of Your Redis CI/CD Pipeline | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Argo CD: What It Is And Why It Should Be Part of Your Redis CI/CD Pipeline


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis
    Profile picture for Talon Miller
    Author:
    Talon Miller, Technical Product Marketing Manager at Redis

    What is an Argo CD?

    Argo CD is a combination of the two terms “Argo” and “CD,” Argo being an open source container-native workflow engine for Kubernetes. It is a CNCF-hosted project that provides an easy way to combine all three modes of computing—services, workflows, and event-based—all of which are very useful for creating jobs and applications on Kubernetes. It is an engine that makes it easy to specify, schedule, and coordinate the running of complex workflows and applications on Kubernetes. The CD in the name refers to continuous delivery, which is an extension of continuous integration (CI) since it automatically deploys all code changes to a testing and/or production environment after the build stage.

    preview image

    Argo CD follows GitOps methodology. GitOps is a CD methodology centered around using Git as a single source of truth for declarative infrastructure and application code. It watches a remote Git repository for new or updated manifest files and synchronizes those changes with the cluster. By managing manifests in Git and syncing them with the cluster, you get all the advantages of a Git-based workflow (version control, pull-request reviews, transparency in collaboration, etc.) and a one-to-one mapping between what is in the Git repo and what is deployed in the cluster.

    Argo CD empowers organizations to declaratively build and run cloud native applications and workflows on Kubernetes using GitOps. It is a pull-based, declarative, GitOps continuous delivery tool for Kubernetes with a fully loaded UI. The tool reads your environment configuration from your Git repository and applies it to your Kubernetes namespaces. App definitions, environment, and configurations should be declarative and version controlled. App deployment and life cycle management should be automated, audible, and easy to understand.

    Built specifically to make the continuous deployment process to a Kubernetes cluster simpler and more efficient, Argo CD solves multiple challenges, such as the need to set up and install additional tools outside of Jenkins for a complete CI/CD process to Kubernetes, the need to configure access control to Kubernetes in and out (including cloud platforms), the need to have visibility of deployment status once a new app gets pushed to a Kubernetes cluster, and more.

    note

    Argo CD isn’t just deployed inside of Kubernetes but should be looked at as an extension of Kubernetes as it uses existing Kubernetes resources and functionalities like etcd and controllers to store data and monitor real-time updates of application state.

    How does Argo CD work?

    how argocd work

    It solves the problems above in the CD process by being a more integral part of the Kubernetes cluster. Instead of pushing changes to the Kubernetes cluster, Argo CD pulls Kubernetes manifest changes and applies them to the cluster. Once you set up Argo CD inside of your Kubernetes cluster, you configure Argo CD to connect and track your Git repo changes.

    If any changes are detected, then Argo CD applies those changes automatically to the cluster. Now developers can commit code (for example, Jenkins), which will automatically build a new image, push it to the docker repo, and then finally update the Kubernetes manifest file that will be automatically pulled by Argo CD, ultimately saving manual work, reducing the initial setup configuration, and eliminating security risks. But what about DevOps teams making other changes to the application configuration? Whatever manifest files connected to the Git repo will be tracked and synced by Argo CD and pulled into the Kubernetes cluster, providing a single flexible deployment tool for developers and DevOps.

    Argo CD watches the changes in the cluster as well, which becomes important if someone updates the cluster manually. Argo CD will detect the divergence of states between the cluster and Git repo. Argo CD compares desired configuration in the Git repo with the actual state in the Kubernetes cluster and syncs what is defined in the Git repo to the cluster, overriding whatever update was done manually—guaranteeing that the Git repo remains the single source of truth. But, of course, Argo CD is flexible enough to be configured to not automatically override manual updates if a quick update needs to happen directly to the cluster and an alert be sent instead.

    What are Argo CD’s capabilities?

    • Argo CD is a very simple and efficient way to have declarative and version-controlled application deployments with its automatic monitoring and pulling of manifest changes in the Git repo, but it also has easy rollback and reverts to the previous state, not manually reverting every update in the cluster.

    • It also provides automation and traceability via GitOps workflow with a web UI for visualizing Kubernetes resources.

    • Argo CD defines Kubernetes manifests in different ways: it supports Kubernetes YAML files, Helm Charts, Kustomize, and other template files that generate Kubernetes manifests.

    • Argo CD also has a command-line interface application, a Grafana metrics dashboard, and audit trails for application events and API calls.

    • Argo CD has a very simple cluster disaster recovery process whereby pointing a new cluster to the Git repo if a cluster has gone down will automatically recreate the same exact state without any intervention because of its declarative nature.

    • Kubernetes cluster access control with Git and Argo CD is simple—you can configure who can commit merge requests and who can actually approve those merge requests, providing a clean way to manage cluster access indirectly via Git. There is also no need to give external access to other tools like Jenkins, keeping cluster credentials safe because Argo CD is already running in the cluster and is the only tool that actually applies changes.

    Prerequisites:

    • Install Docker Desktop
    • Enable Kubernetes
    • Install Argo CD

    Getting Started

    Step 1. Install Argo CD

    brew install argocd

    Step 2. Create a new namespace

    Create a namespace argocd where all Argo CD resources will be installed.

    kubectl create namespace argocd

    Step 3. Install Argo CD resources

    kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml

    kubectl get po -n argocd
    NAME READY STATUS RESTARTS AGE
    argocd-application-controller-0 0/1 ContainerCreating 0 3m9s
    argocd-dex-server-65bf5f4fc7-5kjg6 0/1 Init:0/1 0 3m13s
    argocd-redis-d486999b7-929q9 0/1 ContainerCreating 0 3m13s
    argocd-repo-server-8465d84869-rpr9n 0/1 Init:0/1 0 3m12s
    argocd-server-87b47d787-gxwlb 0/1 ContainerCreating 0 3m11s

    Step 4. Ensure that all Pods are up and running

    kubectl get po -n argocd
    NAME READY STATUS RESTARTS AGE
    argocd-application-controller-0 1/1 Running 0 5m25s
    argocd-dex-server-65bf5f4fc7-5kjg6 1/1 Running 0 5m29s
    argocd-redis-d486999b7-929q9 1/1 Running 0 5m29s
    argocd-repo-server-8465d84869-rpr9n 1/1 Running 0 5m28s
    argocd-server-87b47d787-gxwlb 1/1 Running 0 5m27s

    Step 5. Configure Port Forwarding for Dashboard Access

    kubectl port-forward svc/argocd-server -n argocd 8080:443
    Forwarding from 127.0.0.1:8080 -> 8080
    Forwarding from [::1]:8080 -> 8080

    access argocd

    Step 6. Log in

    kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d; echo

    HcD1I0XXXXXQVrq-

    login

    Step 7. Install Argo CD CLI on Mac using Homebrew

    brew install argocd

    Step 8. Access the Argo CD API Server

    By default, the Argo CD API server is not exposed with an external IP. To access the API server, choose one of the following techniques to expose the Argo CD API server:

    kubectl patch svc argocd-server -n argocd -p '{"spec": {"type": "LoadBalancer"}}'
    service/argocd-server patched

    Step 9. Log in to Argo CD

    argocd login localhost
    WARNING: server certificate had error: x509: certificate signed by unknown authority. Proceed insecurely (y/n)? y
    Username: admin
    Password:
    'admin:login' logged in successfully
    Context 'localhost' updated

    Step 10. Update the password

    % argocd account update-password
    *** Enter password of currently logged in user (admin):
    *** Enter new password for user admin:
    *** Confirm new password for user admin:
    Password updated
    Context 'localhost' updated
    ajeetraina@Ajeets-MacBook-Pro ~ %

    Step 11. Register a Cluster to Deploy Apps to

    As we are running it on Docker Desktop, we will add it accordingly.

    argocd cluster add docker-desktop
    WARNING: This will create a service account `argocd-manager` on the cluster referenced by context `docker-desktop` with full cluster level admin privileges. Do you want to continue [y/N]? y
    INFO[0002] ServiceAccount "argocd-manager" created in namespace "kube-system"
    INFO[0002] ClusterRole "argocd-manager-role" created
    INFO[0002] ClusterRoleBinding "argocd-manager-role-binding" created
    Cluster 'https://kubernetes.docker.internal:6443' added

    Step 12. Create a New Redis application

    Click “Create” and provide a repository URL as https://github.com/argoproj/argo-cd/tree/master/manifests/base/redis

    create new redis app

    arogocd app

    argocd app list
    NAME CLUSTER NAMESPACE PROJECT STATUS HEALTH SYNCPOLICY CONDITIONS REPO PATH TARGET
    redisdemo https://kubernetes.docker.internal:6443 argocd default Synced Healthy <none> <none> https://github.com/argoproj/argo-cd manifests/base/redis HEAD


    redisdemo app

    summary of the app

    health of the redisdemo app

    Step 13. Delete the Redis app

    argocd app delete redisdemo

    Example: Voting App

    Let’s try to deploy a voting app. The voting application only accepts one vote per client. It does not register votes if a vote has already been submitted from a client.

    voting app

    • A Python web app which lets you vote between two options
    • A Redis queue which collects new votes
    • A .NET worker which consumes votes and stores them in …
    • A Postgres database backed by a Docker volume
    • A Node.js web app which shows the results of the voting in real time

    Go to the Argo CD dashboard, enter the repository URL, and supply the right PATH. Click “Create App” to deploy the application on your Docker Desktop.

    enter the repo

    Next, visualize the complete application by choosing “Group by Node.”

    votingapp status

    Step 14. Grouping by Parent Resources

    parent resources and grouping

    Keep a close watch over the events by clicking on “Events” section.

    events

    Below is the complete overview of the voting application.

    overview

    Access the app via http://localhost:31000/

    access app

    The results are accessible via http://localhost:31001/

    results

    Additional References:

    - + \ No newline at end of file diff --git a/operate/continuous-integration-continuous-deployment/circleci/index.html b/operate/continuous-integration-continuous-deployment/circleci/index.html index 05b03feb1e..bbf0feb3d3 100644 --- a/operate/continuous-integration-continuous-deployment/circleci/index.html +++ b/operate/continuous-integration-continuous-deployment/circleci/index.html @@ -4,7 +4,7 @@ CircleCI: What It Is and Why It Should Be Part of Your Redis CI/CD | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    CircleCI: What It Is and Why It Should Be Part of Your Redis CI/CD


    Profile picture for Talon Miller
    Author:
    Talon Miller, Technical Product Marketing Manager at Redis
    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    What is CircleCI?

    CircleCI and Redis logos

    CircleCI is a CI/CD platform built by DevOps professionals to help you fine-tune your entire development process from start to finish. It helps engineering teams build, test, and deploy software while checking code changes in real time with the CircleCI dashboard UI. More control over pipelines is possible with the ability to build in the CI/CD process per project or configure workflows to decide on when and how jobs run, plus data- and image-caching options that optimize continuous delivery.

    CircleCI supports many different languages and a varied amount of cloud-hosted compute types such as Docker, Linux VMs, macOS, Windows, and more for a simplified approach to infrastructure. With the CircleCI dashboard, it's possible to gather insights on build changes to continuously optimize pipelines.

    To deploy, CircleCI takes artifacts from your pipeline and sends them to wherever you need them deployed, whether it's Docker, Heroku, Kubernetes, cloud platforms, and more.

    How does CircleCI work?

    CircleCI Overview

    After GitHub or Bitbucket are authorized and added as a project to circleci.com, every code change triggers CircleCI jobs. CircleCI sends an email notification of success or failure after the test completes. CircleCI finds and runs config.yml, tests the build, runs security scans, goes through approval steps, and then deploys.

    What are CircleCI’s capabilities?

    As a DevOps engineer or developer, you can:

    • SSH into any job to debug build issues.
    • Set up jobs to run in parallel to reduce time.
    • Configure a Redis cache with two simple keys to reuse data from previous jobs in your workflow.
    • Configure self-hosted runners for unique platform support.
    • Access Arm resources for the machine executor.
    • Use reusable packages of configuration to integrate with third parties.
    • Use a pre-built Redis Docker image in a variety of languages.
    • Use the API to retrieve information about jobs and workflows.
    • Use the CLI to access advanced tools locally.
    • Get flaky test detection with test insights.

    Deploy a Redis Rate Limiting application on Heroku using CircleCI

    Prerequisites:

    • A CircleCI account
    • A GitHub account
    • A Heroku account

    Getting started

    In this demo, we will be using the Redis Rate Limiting app built using Python and Redis.

    Rate limiting is a mechanism that many developers may have to deal with at some point in their life. It’s useful for a variety of purposes, such as sharing access to limited resources or limiting the number of requests made to an API endpoint and responding with a 429 status code. The complete source code of the project is hosted over at GitHub. In this example, we will configure CircleCI to deploy the Rate Limiting app directly on the Heroku platform.

    Step 1. Log in to CircleCI

    Go to https://circleci.com and log in using your GitHub account:

    CircleCI Login

    Choose your preferred login method. To make it easier, let us choose “GitHub” for this demonstration.

    Step 2. Verify your permissions on GitHub

    Authenticating

    Step 3. Select your project repository and click “Setup Project”

    We will be using the Redis rate-limiting project for this demonstration.

    Selecting the project

    Step 4. Create a new CircleCI configuration file

    CircleCI believes in configuration as code. As a result, the entire delivery process from build to deploy is orchestrated through a single file called config.yml. The config.yml file is located in a folder called .circleci at the top of your project. CircleCI uses the YAML syntax for config.

    Selecting a config file

    As we haven’t yet created a config.yml file, let’s choose the “Fast” option to create a new config.yml based on the available template that is editable.

    Selecting the fast option

    Once you click “Set Up Project” it will ask you to select sample configs as shown in the following screenshot:

    Sample configs

    Add the following content under .circleci/config.yml and save the file:

    version: 2.1
    orbs:
    heroku: circleci/heroku@1.2.6
    workflows:
    heroku_deploy:
    jobs:
    - heroku/deploy-via-git

    In the configuration above, we pull in the Heroku orb circleci/heroku@1.2.6, which automatically gives us access to a powerful set of Heroku jobs and commands.

    One of those jobs is heroku/deploy-via-git, which deploys your application straight from your GitHub repo to your Heroku account.

    Step 5. Merge the pull request

    Once you make the new change, it will ask you to raise a new PR. Go ahead and merge the changes as of now.

    Open a pull request

    Step 6. Set up a Heroku account

    Follow these steps to set up a Heroku account and create a new app rate-limit-python. You will need the Heroku API key for this demo.

    Set up a Heroku account

    Step 7. Configuring Heroku access on CircleCI

    Before you push your project to Heroku from CircleCI, you will need to configure an authenticated handshake between CircleCI and Heroku. Configure the handshake by creating two environment variables in the settings for your CircleCI project:

    • HEROKU_APP_NAME is the name of your Heroku application (in this case, simple-node-api-circleci)
    • HEROKU_API_KEY is your Heroku account API key. This can be found under the Account tab of your Heroku account under Account Settings. Scroll to the API Key section and click Reveal to copy your API key.

    Step 8. Set up the environment variables on CircleCI

    On the sidebar menu of the settings page, click Environment Variables under Build Settings. On the environment variables page, create two variables named HEROKU_APP_NAME and HEROKU_API_KEY and enter the values for them.

    With these in place, our CircleCI configuration will be able to make authenticated deployments to the Heroku platform.

    Heroku Environment Variables

    Step 9. Trigger the build

    As soon as you merge the pull request, the tool will trigger the build automatically.

    Triggering the build

    You should now be able to access your application.

    remote:        Collecting attrs>=19.2.0
    remote: Downloading attrs-21.4.0-py2.py3-none-any.whl (60 kB)
    remote: Collecting wrapt<2,>=1.10
    remote: Downloading wrapt-1.14.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (77 kB)
    remote: Collecting pyparsing!=3.0.5,>=2.0.2
    remote: Downloading pyparsing-3.0.7-py3-none-any.whl (98 kB)
    remote: Installing collected packages: wrapt, pyparsing, typing-extensions, sqlparse, packaging, deprecated, async-timeout, asgiref, tomli, redis, py, pluggy, iniconfig, h11, Django, click, attrs, uvloop, uvicorn, urllib3, sniffio, rfc3986, pytz, python-dotenv, pytest, django-redis, django-ipware, django-cors-headers
    remote: Successfully installed Django-4.0.3 asgiref-3.5.0 async-timeout-4.0.2 attrs-21.4.0 click-8.1.0 deprecated-1.2.13 django-cors-headers-3.11.0 django-ipware-4.0.2 django-redis-5.2.0 h11-0.13.0 iniconfig-1.1.1 packaging-21.3 pluggy-1.0.0 py-1.11.0 pyparsing-3.0.7 pytest-7.1.1 python-dotenv-0.20.0 pytz-2022.1 redis-4.2.0 rfc3986-2.0.0 sniffio-1.2.0 sqlparse-0.4.2 tomli-2.0.1 typing-extensions-4.1.1 urllib3-1.26.9 uvicorn-0.17.6 uvloop-0.16.0 wrapt-1.14.0
    remote: -----> $ python server/manage.py collectstatic --noinput
    remote: 133 static files copied to '/tmp/build_3850bcfb/server/static_root'.
    remote:
    remote: -----> Discovering process types
    remote: Procfile declares types -> web
    remote:
    remote: -----> Compressing...
    remote: Done: 75.3M
    remote: -----> Launching...
    remote: Released v11
    remote: https://*****************.herokuapp.com/ deployed to Heroku

    Application running

    Additional references:

    - + \ No newline at end of file diff --git a/operate/continuous-integration-continuous-deployment/index.html b/operate/continuous-integration-continuous-deployment/index.html index 95cc1b59fa..cedfd3e42d 100644 --- a/operate/continuous-integration-continuous-deployment/index.html +++ b/operate/continuous-integration-continuous-deployment/index.html @@ -4,7 +4,7 @@ Continuous Integration/Deployment | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Continuous Integration/Deployment

    The following links show you the different ways to embed Redis into your continuous integration and continuous deployment process.

    Learn how to deploy Redis Enterprise Database from a Jenkins Pipeline
    Learn how use CircleCI to deploy a Redis application
    Learn about Argo CD and implement voting app using Redis
    - + \ No newline at end of file diff --git a/operate/continuous-integration-continuous-deployment/jenkins/index.html b/operate/continuous-integration-continuous-deployment/jenkins/index.html index 2758170927..956719cae6 100644 --- a/operate/continuous-integration-continuous-deployment/jenkins/index.html +++ b/operate/continuous-integration-continuous-deployment/jenkins/index.html @@ -4,7 +4,7 @@ How to Deploy a Redis Enterprise Database from a Jenkins Pipeline | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    How to Deploy a Redis Enterprise Database from a Jenkins Pipeline


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis
    Profile picture for Matthew Royal
    Author:
    Matthew Royal, Consulting Engineer at Redis

    Jenkins is currently the most popular CI tool, with ~15M users. It is an open source automation server which enables developers to reliably build, test, and deploy their software. It was forked in 2011 from a project called Hudson after a dispute with Oracle, and is used for Continuous Integration and Continuous Delivery (CI/CD) and test automation. Jenkins is based on Java and provides over 1700 plugins to automate your developer workflow and save a lot of your time in executing your repetitive tasks.

    image

    Source: Datanyze market analysis

    Jenkins Pipeline performs Continuous Delivery tasks declared in a Jenkinsfile stored alongside your project's code. The Pipeline plugin has a fairly comprehensive tutorial checked into its source tree. Using Pipeline, you can configure Jenkins to automatically deploy key pieces of infrastructure, such as a Redis database.

    Architecture

    Jenkins Pipelines are the Continuous Delivery (CD) side of Jenkins. They use a Jenkinsfile declarative script to define the behavior of the pipeline. You can script actions in Groovy and run shell scripts from it, so you can make it do pretty much anything.

    The Jenkinsfile instructs Jenkins to export some environment variables from the Credentials store in order to connect to the Redis server, then executes the Python pipeline script with the Deployment Configuration file given as a parameter. An example deployment-configuration-file.json looks like:

    {
    "database": {
    "name": "made-with-jenkins",
    "port": 12345,
    "size": "S",
    "operation": "CREATE"
    }
    }

    The Python script uses predefined JSON template files that create Redis databases of fixed t-shirt sizes (S, M, L, XL). The Deployment Config file tells the Python script what the desired database name, port, and size are. A sample template file looks like:

    {
    "name": "{NAME}",
    "type": "redis",
    "memory_size": 343597383
    }

    The following is an architectural diagram of how a Jenkins pipeline adds a database to a Redis cluster.

    Architecture diagram

    Process

    1. The Jenkins pipeline clones a remote git repository, containing the application code and the pipeline code.
    2. The Redis host, port, user, and password are decrypted from the credentials store and are exported as environment variables.
    3. Jenkins runs the Python pipeline script, specifying the deployment configuration file in the git repo.
    4. The Python script uses the deployment configuration file to choose and customize a pre-populated template to use as the body of the REST create database request to Redis.

    List of Pipeline Code Files

    Configuring Jenkins

    Installing Jenkins

    You can use Docker Desktop to quickly get a Jenkins instance up and running, exposing ports 8080 (web GUI) and 50000 (inbound agents).

    docker run --name jenk -p 8080:8080 -p 50000:50000 jenkins/jenkins:lts-jdk11

    The installation will generate a first-run password in the docker-cli output.

    Then open http://localhost:8080/ and enter the password to unlock your instance and begin installation.

    Beginning installation of Jenkins

    Choose "Install suggested plugins"

    Installing plugins

    Wait for the plugins to complete the installation process.

    Plugin installation

    Next, you’re prompted to create your admin user.

    Creating an admin user

    Congratulations! Jenkins is ready!

    Jenkins ready for use

    Installing Python and custom libraries

    If you use an existing instance of Jenkins, you can install Python and the custom libraries from the command line interface of that machine.

    Docker instances of Jenkins can be accessed by shell using the following command:

    docker exec -it -u root jenk bash

    The Python pipeline script requires the libraries click and requests. It also requires Python.

    apt-get update
    apt-get install -y python3-pip

    pip install --upgrade pip
    pip install click
    pip install requests

    Alternatively, if you are creating a new Jenkins from scratch, you can include these dependencies in a separate Dockerfile that builds off the base Jenkins image:

    FROM jenkins:latest
    USER root
    RUN apt-get update
    RUN apt-get install -y python-pip

    # Install app dependencies
    RUN pip install --upgrade pip
    RUN pip3 install click
    RUN pip3 install requests

    Add credentials to Secret Store

    Using the left-side menu, select Manage Jenkins, then select Manage Credentials, then click the link (global).

    Adding credentials

    Adding credentials

    Adding credentials

    From here, you can specify Kind: Secret text for the 4 secrets required to connect with the Redis REST endpoint:

    • REDIS_SERVER_FQDN
      • Set to the 'https://server-address' of the target Redis instance.
    • REDIS_SERVER_PORT
      • Set to the Redis REST API port (default 9443).
    • REDIS_USER
      • Set to the Redis admin user allowed to create databases.
    • REDIS_PASS
      • Set to the Redis admin user's password.

    Managing secrets

    If you are using a private code repository, you may also wish to include a Personal Access Token here.

    Create the Jenkins pipeline

    From the dashboard, click New Item.

    Creating a new pipeline

    Enter in a name for the pipeline, and choose the Pipeline type.

    Naming the pipeline

    Connect GitHub repository

    From the Pipeline configuration page that appears, check the GitHub box and enter the git clone URL, complete with any credentials needed to read the repository. For GitHub access, the password should be a Personal Access Token rather than the actual user password.

    Configuring GitHub access

    Redis pipeline Jenkinsfile

    Scrolling down on this page to the Advanced Project Options, you can either past in the Jenkinsfile, or you can specify the filename if the file exists in the git repository.

    Specifying your Jenkinsfile

    Here is an example Jenkinsfile containing the mapping of Credentials to the environment variables, and 2 separate stages – a Hello World which always succeeds, and a build stage that invokes the Python script. Paste this into the pipeline script section.

    pipeline {
    agent any

    environment {
    REDIS_SERVER_FQDN = credentials('REDIS_SERVER_FQDN')
    REDIS_SERVER_PORT = credentials('REDIS_SERVER_PORT')
    REDIS_USER = credentials('REDIS_USER')
    REDIS_PASS = credentials('REDIS_PASS')
    }

    stages {
    stage('Hello') {
    steps {
    echo 'Hello World'
    }
    }

    stage('build') {
    steps {
    git branch: 'main', url: 'https://github.com/masyukun/redis-jenkins-pipeline.git'
    sh 'python3 jenkins-re-pipeline.py --deployfile deployment-configuration-file.json'
    }
    }
    }
    }

    Click "Save" when the job spec is complete.

    Run the Jenkins pipeline

    Click on the pipeline you created:

    Selecting your pipeline

    Click the "Build Now" icon on the left side menu.

    Building your pipeline

    Click the Status icon on the left side menu in order to see the results of all the output from each of the stages of your pipeline.

    Pipeline status

    Hover over the build stage and click the Logs button of the most recent build in order to see the Python script’s output.

    Viewing the Python logs

    Sample output: you should see a verbose response from Redis’s REST service in the “Shell Script” accordion pane.

    There’s also a “Git” output log, in case you need to debug something at that level. Any time you update the branch in the remote git repository, you should see evidence in that log that the latest changes have successfully checked out into the local Jenkins git repository.

    Viewing the git log

    Open your Redis Enterprise Secure Management UI at https://servername:8443 and click on the databases menu item to verify that your database was created with the name, port, and size specified in the deployment-configuration-file.json file.

    Verifying database creation

    Congratulations! You have deployed a Redis Enterprise database using a Jenkins Pipeline!

    The GitHub repository is currently: https://github.com/masyukun/redis-jenkins-pipeline

    Further Reading

    - + \ No newline at end of file diff --git a/operate/docker/nodejs-nginx-redis/index.html b/operate/docker/nodejs-nginx-redis/index.html index 09003af019..b0c45cb68c 100644 --- a/operate/docker/nodejs-nginx-redis/index.html +++ b/operate/docker/nodejs-nginx-redis/index.html @@ -4,7 +4,7 @@ How to build and run a Node.js application using Nginx, Docker and Redis | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    How to build and run a Node.js application using Nginx, Docker and Redis


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Thanks to Node.js - Millions of frontend developers that write JavaScript for the browser are now able to write the server-side code in addition to the client-side code without the need to learn a completely different language. Node.js is a free, open-sourced, cross-platform JavaScript run-time environment. It is capable to handle thousands of concurrent connections with a single server without introducing the burden of managing thread concurrency, which could be a significant source of bugs.

    Nginx-node

    In this quickstart guide, you will see how to build a Node.js application (visitor counter) using Nginx, Redis and Docker.

    What do you need?

    • Node.js: An open-source, cross-platform, back-end JavaScript runtime environment that runs on the V8 engine and executes JavaScript code outside a web browser.
    • Nginx: An open source software for web serving, reverse proxying, caching, load balancing, media streaming, and more.
    • Docker: a containerization platform for developing, shipping, and running applications.
    • Docker Compose: A tool for defining and running multi-container Docker applications.

    Project structure

    .
    ├── docker-compose.yml
    ├── redis
    ├── nginx
    │   ├── Dockerfile
    │   └── nginx.conf
    ├── web1
    │   ├── Dockerfile
    │   ├── package.json
    │   └── server.js
    └── web2
    ├── Dockerfile
    ├── package.json
    └── server.js

    Prerequisites:

    – Install Docker Desktop

    Use Docker's install guide to setup Docker Desktop for Mac or Windows on your local system.

    Docker Deskto

    info

    Docker Desktop comes with Docker compose installed by default, hence you don't need to install it separately.

    Step 1. Create a Docker compose file

    Create an empty file with the below content and save it by name - "docker-compose.yml"

    version: '3.9'
    services:
    redis:
    image: 'redis:alpine'
    ports:
    - '6379:6379'
    web1:
    restart: on-failure
    build: ./web
    hostname: web1
    ports:
    - '81:5000'
    web2:
    restart: on-failure
    build: ./web
    hostname: web2
    ports:
    - '82:5000'
    nginx:
    build: ./nginx
    ports:
    - '80:80'
    depends_on:
    - web1
    - web2

    The compose file defines an application with four services redis, web1, web2 and nginx. When deploying the application, docker-compose maps port 80 of the web service container to port 80 of the host as specified in the file.

    info

    By default, Redis runs on port 6379. Make sure you don't run another instance of Redis on your system or port 6379 on the host is not being used by another container, otherwise the port should be changed.

    Step 2. Create an nginx directory and add the following files:

    File: nginx/nginx.conf

    upstream loadbalancer {
    server web1:5000;
    server web2:5000;
    }

    server {
    listen 80;
    server_name localhost;
    location / {
    proxy_pass http://loadbalancer;
    }
    }

    File: Dockerfile

    FROM nginx:1.21.6
    RUN rm /etc/nginx/conf.d/default.conf
    COPY nginx.conf /etc/nginx/conf.d/default.conf

    Step 3. Create a web directory and add the following files:

    File: web/Dockerfile

    FROM node:14.17.3-alpine3.14

    WORKDIR /usr/src/app

    COPY ./package.json ./
    RUN npm install
    COPY ./server.js ./

    CMD ["npm","start"]

    File: web/package.json


    "name": "web",
    "version": "1.0.0",
    "description": "Running Node.js and Express.js on Docker",
    "main": "server.js",
    "scripts": {
    "start": "node server.js"
    },
    "dependencies": {
    "express": "^4.17.2",
    "redis": "3.1.2"
    },
    "author": "",
    "license": "MIT"
    }

    File: web/server.js

    const express = require('express');
    const redis = require('redis');
    const app = express();
    const redisClient = redis.createClient({
    host: 'redis',
    port: 6379
    });

    app.get('/', function(req, res) {
    redisClient.get('numVisits', function(err, numVisits) {
    numVisitsToDisplay = parseInt(numVisits) + 1;
    if (isNaN(numVisitsToDisplay)) {
    numVisitsToDisplay = 1;
    }
    res.send('Number of visits is: ' + numVisitsToDisplay);
    numVisits++;
    redisClient.set('numVisits', numVisits);
    });
    });

    app.listen(5000, function() {
    console.log('Web application is listening on port 5000');
    });

    Step 4. Deploy the application

    Let us deploy the full-fledged app using docker-compose:

    $ docker-compose up -d
    Creating nginx-nodejs-redis_redis_1 ... done
    Creating nginx-nodejs-redis_web1_1 ... done
    Creating nginx-nodejs-redis_web2_1 ... done
    Creating nginx-nodejs-redis_nginx_1 ... done

    Expected result

    Listing the running containers. You should see three containers running and the port mapping as below:

    docker-compose ps
    Name Command State Ports
    ------------------------------------------------------------------------------------------
    nginx-nodejs-redis_nginx_1 /docker-entrypoint.sh ngin Up 0.0.0.0:80->80/tcp
    ...
    nginx-nodejs-redis_redis_1 docker-entrypoint.sh redis Up 0.0.0.0:6379->6379/tcp
    ...
    nginx-nodejs-redis_web1_1 docker-entrypoint.sh npm Up 0.0.0.0:81->5000/tcp
    start
    nginx-nodejs-redis_web2_1 docker-entrypoint.sh npm Up 0.0.0.0:82->5000/tcp
    start

    Step 5. Testing the app

    After the application starts, navigate to http://localhost in your web browser or run:

    curl localhost:80
    curl localhost:80
    web1: Total number of visits is: 1
    curl localhost:80
    web1: Total number of visits is: 2
    $ curl localhost:80
    web2: Total number of visits is: 3
    $ curl localhost:80
    web2: Total number of visits is: 4

    Step 6. Monitoring Redis keys

    If you want to monitor the Redis keys, you can use the MONITOR command. Install redis-cli on your Mac system using brew install redis and then directly connect to Redis container by issuing the following command:

    % redis-cli
    127.0.0.1:6379> monitor
    OK
    1646485507.290868 [0 172.24.0.2:34330] "get" "numVisits"
    1646485507.309070 [0 172.24.0.2:34330] "set" "numVisits" "5"
    1646485509.228084 [0 172.24.0.2:34330] "get" "numVisits"
    1646485509.241762 [0 172.24.0.2:34330] "set" "numVisits" "6"
    1646485509.619369 [0 172.24.0.4:52082] "get" "numVisits"
    1646485509.629739 [0 172.24.0.4:52082] "set" "numVisits" "7"
    1646485509.990926 [0 172.24.0.2:34330] "get" "numVisits"
    1646485509.999947 [0 172.24.0.2:34330] "set" "numVisits" "8"
    1646485510.270934 [0 172.24.0.4:52082] "get" "numVisits"
    1646485510.286785 [0 172.24.0.4:52082] "set" "numVisits" "9"
    1646485510.469613 [0 172.24.0.2:34330] "get" "numVisits"
    1646485510.480849 [0 172.24.0.2:34330] "set" "numVisits" "10"
    1646485510.622615 [0 172.24.0.4:52082] "get" "numVisits"
    1646485510.632720 [0 172.24.0.4:52082] "set" "numVisits" "11"

    Further References

    - + \ No newline at end of file diff --git a/operate/index.html b/operate/index.html index f4b02218be..24c65fa716 100644 --- a/operate/index.html +++ b/operate/index.html @@ -4,7 +4,7 @@ Operate Your Redis Database | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Operate Your Redis Database

    The following links demonstrate various ways to provision Redis and accelerate app deployment using Devops.

    docusaurus mascot

    Explore by Category

    Embed Redis into your CI/CD Process
    Observe key indicators critical to operating Redis
    Automate delivery of Redis to your org
    Create a performant, stable, and secure deployment of Redis
    Connect your containerized workloads to Redis
    - + \ No newline at end of file diff --git a/operate/observability/datadog/index.html b/operate/observability/datadog/index.html index eff7c97395..2a6ed785c2 100644 --- a/operate/observability/datadog/index.html +++ b/operate/observability/datadog/index.html @@ -4,7 +4,7 @@ Redis Enterprise Observability with Datadog | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Redis Enterprise Observability with Datadog


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis
    Profile picture for Christian Mague
    Author:
    Christian Mague, Former Principal Field Engineer at Redis

    Datadog

    Devops and SRE practitioners are already keenly aware of the importance of system reliability, as it’s one of the shared goals in every high performing organization. Defining clear reliability targets based on solid data is crucial for productive collaboration between developers and SREs. This need spans the entire infrastructure from application to backend database services.

    Service Level Objectives (SLOs) provide a powerful interface for all teams to set clear performance and reliability goals based on Service Level Indicators (SLIs) or data points. A good model is to think of the SLIs as the data and the SLO as the information one uses to make critical decisions.

    Further Read: https://cloud.google.com/blog/products/devops-sre/sre-fundamentals-slis-slas-and-slos

    Redis

    Redis is a popular multi-model NoSQL database server that provides in-memory data access speeds for search, messaging, streaming, caching, and graph—amongst other capabilities. Highly performant sites such as Twitter, Snapchat, Freshworks, GitHub, Docker, Pinterest, and Stack Overflow all look to Redis to move data in real time.

    Redis SLOs can be broken down into three main categories:

    CategoryDefinitionExample SLOExample SLI
    ThroughputNumber of operations being pushed through the service in a given time periodSystem should be capable of performing 200M operations per secondredisenterprise.total_req
    LatencyElapsed time it takes for an operationAverage write latency should not exceed 1 millisecondredis_enterprise.avg_latency
    CapacityMemory/storage/network limits of the underlying data sourceDatabase should have 20% memory overhead available to handle burstsredisenterprise.used_memory_percent

    Why Datadog?

    Running your own performance data platform is time consuming and difficult. Datadog provides an excellent platform with an open source agent to collect metrics and allows them to be displayed easily and alerted upon when necessary.

    Datadog allows you to:

    • Collect metrics from various infrastructure components out of the box
    • Display that data in easy to read dashboards
    • Monitor performance metrics and alert accordingly
    • Correlate log entries with metrics to quickly drill down to root causes

    Key Performance Indicators

    1. Latency

    Definition

    redisenterprise.avg_latency (unit: microseconds)

    This is the average amount of time that a request takes to return from the time that it first hits the Redis Enterprise proxy until the response is returned. It does not include the full time from the remote client’s perspective.

    Characteristics

    Since Redis is popular due to performance, generally you would expect most operations to return in single digit milliseconds. Tune any alerts to match your SLA. It’s generally recommended that you also measure Redis operation latency at the client side to make it easier to determine if a server slow down or an increase in network latency is the culprit in any performance issues.

    Possible Causes
    CauseFactors
    Spike in requestsCheck both the Network Traffic and Operations Per Second metrics to determine if there is a corresponding increase
    Slow-running queriesCheck the slow log in the Redis Enterprise UI for the database
    Insufficient compute resourcesCheck to see if the CPU Usage, Memory Usage Percentage, or Evictions are increasing
    Remediation
    ActionMethod
    Increase resourcesThe database can be scaled up online by going to the Web UI and enabling clustering on the database. In extreme cases, more nodes can be added to the cluster and resources rebalanced
    Inefficient queriesRedis allows you to view a slow log with a tunable threshold. It can be viewed either in the Redis Enterprise UI or by running: redis-cli -h HOST -p PORT -a PASSWORD SLOWLOG GET 100

    2. Memory Usage Percentage

    Definition

    redisenterprise.memory_usage_percent (unit: percentage)

    This is the percentage of used memory over the memory limit set for the database.

    Characteristics

    In Redis Enterprise, all databases have a maximum memory limit set to ensure isolation in a multi-tenant environment. This is also highly recommended when running open source Redis. Be aware that Redis does not immediately free memory upon key deletion. Depending on the size of the database, generally between 80-95% is a safe threshold.

    Possible Causes
    CauseFactors
    Possible spike in activityCheck both the Network Traffic and Operations Per Second metrics to determine if there is a corresponding increase
    Database sized incorrectlyView the Memory Usage raw bytes over time to see if a usage pattern has changed
    Incorrect retention policiesCheck to see if keys are being Evicted or Expired
    Remediation
    ActionMethod
    Increase resourcesThe database memory limit can be raised online with no downtime through either the Redis Enterprise UI or the API
    Retention PolicyIn a caching use case, setting a TTL for unused data to expire is often helpful. In addition, Eviction policies can be set, however, these may often not be able to keep up in extremely high throughput environments with very tight resource constraints

    3. Cache Hit Rate

    Definition
    redisenterprise.cache_hit_rate (unit: percent)

    This is the percentage of time that Redis is accessing a key that already exists.

    Characteristics

    This metric is useful only in the caching use case and should be ignored for all other use cases. There are tradeoffs between the freshness of the data in the cache and efficacy of the cache mitigating traffic to any backend data service. These tradeoffs should be considered carefully when determining the threshold for alerting.

    Possible Causes

    This is highly specific to the application caching with no general rules that are applicable in the majority of cases.

    Remediation

    Note that Redis commands return information on whether or not a key or field already exists. For example, the HSET command returns the number of fields in the hash that were added.

    4. Evictions

    Definition
    redisenterprise.evicted_objects (unit: count)

    This is the count of items that have been evicted from the database.

    Characteristics

    Eviction occurs when the database is close to capacity. In this condition, the eviction policy starts to take effect. While Expiration is fairly common in the caching use case, Eviction from the cache should generally be a matter of concern. At very high throughput and very restricted resource use cases, sometimes the eviction sweeps cannot keep up with memory pressure. Relying on Eviction as a memory management technique should be considered carefully.

    Possible Causes

    See Memory Usage Percentage Possible Causes

    Remediation

    See Memory Usage Percentage Remediation

    Secondary Indicators

    1. Network Traffic

    Definition
    redisenterprise.ingress_bytes/redisenterprise.egress_bytes (unit: bytes)

    Counters for the network traffic coming into the database and out from the database.

    Characteristics

    While these two metrics will not help you pinpoint a root cause, network traffic is an excellent leading indicator of trouble. Changes in network traffic patterns indicate corresponding changes in database behavior and further investigation is usually warranted.

    2. Connection Count

    Definition
    redisenterprise.conns (unit: count)

    The number of current client connections to the database.

    Characteristics

    This metric should be monitored with both a minimum and maximum number of connections. The minimum number of connections not being met is an excellent indicator of either networking or application configuration errors. The maximum number of connections being exceeded may indicate a need to tune the database.

    Possible Causes
    CauseFactors
    Minimum clients not metIncorrect client configuration, network firewall, or network issues
    Maximum connections exceededClient library is not releasing connections or an increase in the number of clients
    Remediation
    ActionMethod
    Clients MisconfiguredConfirm client configurations
    Networking issueIssue the PING command from a client node TELNET to the endpoint
    Too many connectionsBe sure that you are using pooling on your client library and that your pools are sized accordingly
    Too many connectionsUsing rladmin, run: tune proxy PROXY_NUMBER threads VALUE threads VALUE

    You can access the complete list of metrics here.

    Getting Started

    Follow the steps below to set up the Datadog agent to monitor your Redis Enterprise cluster, as well as database metrics:

    Quickstart Guide:

    Prerequisites:

    • Follow this link to setup your Redis Enterprise cluster and database
    • Setup a Read-only user account by logging into your Redis Enterprise instance and visiting the “Access Control” section

    alt_text

    • Add a new user account with Cluster View Permissions.

    alt_text

    Step 1. Set Up a Datadog Agent

    Before we jump into the installation, let’s look at the various modes that you can run the Datadog agent in:

    • External Monitor Mode
    • Localhost Mode

    External Monitor Mode

    alt_text

    In external monitor mode, a Datadog agent running outside of the cluster can monitor multiple Redis Enterprise clusters, as shown in the diagram above.

    Localhost Mode

    Using localhost mode, the integration can be installed on every node of a Redis Enterprise cluster. This allows the user to correlate OS level metrics with Redis-specific metrics for faster root cause analysis. Only the Redis Enterprise cluster leader will submit metrics and events to Datadog. In the event of a migration of the cluster leader, the new cluster leader will begin to submit data to Datadog.

    alt_text

    For this demo, we will be leveraging localhost mode as we just have two nodes to configure.

    Step 2. Launch the Datadog agent on the Master node

    Pick up your preferred OS distribution and install the Datadog agent

    alt_text

    Run the following command to install the integration wheel with the Agent. Replace the integration version with 1.0.1.

     datadog-agent integration install -t datadog-redisenterprise==<INTEGRATION_VERSION>

    Step 3. Configuring Datadog configuration file

    Copy the sample configuration and update the required sections to collect data from your Redis Enterprise cluster:

    For Localhost Mode

    The following minimal configuration should be added to the Enterprise Master node.

     sudo vim /etc/datadog-agent/conf.d/redisenterprise.d/conf.yaml
     #################################################################
    # Base configuration
    init_config:

    instances:
    - host: localhost
    username: user@example.com
    password: secretPassword
    port: 9443

    Similarly, you need to add the edit the configuration file for the Enterprise Follower to add the following:

     sudo vim /etc/datadog-agent/conf.d/redisenterprise.d/conf.yaml
      #################################################################
    # Base configuration
    init_config:

    instances:
    - host: localhost
    username: user@example.com
    password: secretPassword
    port: 9443

    For External Monitor Mode

    The following configuration should be added to the Monitor node

    #  Base configuration
    init_config:

    instances:
    - host: cluster1.fqdn
    username: user@example.com
    password: secretPassword
    port: 9443

    - host: cluster2.fqdn
    username: user@example.com
    password: secretPassword
    port: 9443

    Step 4. Restart the Datadog Agent service

     sudo service datadog-agent restart

    Step 5. Viewing the Datadog Dashboard UI

    Find the Redis Enterprise Integration under the Integration Menu:

    alt_text

    Displaying the host reporting data to Datadog:

    alt_text

    Listing the Redis Enterprise dashboards:

    alt_text

    Host details under Datadog Infrastructure list:

    alt_text

    Datadog dashboard displaying host metrics of the 1st host (CPU, Memory Usage, Load Average etc):

    alt_text

    Datadog dashboard displaying host metrics of the 2nd host:

    alt_text

    Step 6. Verifying the Datadog Agent Status

    Running the datadog-agent command shows that the Redis Enterprise integration is working correctly.

     sudo datadog-agent status
     redisenterprise (1.0.1)
    -----------------------
    Instance ID: redisenterprise:ef4cd60aadac5744 [OK]
    Configuration Source: file:/etc/datadog-agent/conf.d/redisenterprise.d/conf.yaml
    Total Runs: 2
    Metric Samples: Last Run: 0, Total: 0
    Events: Last Run: 0, Total: 0
    Service Checks: Last Run: 0, Total: 0
    Average Execution Time : 46ms
    Last Execution Date : 2021-10-28 17:27:10 UTC (1635442030000)
    Last Successful Execution Date : 2021-10-28 17:27:10 UTC (1635442030000)

    Redis Enterprise Cluster Top View

    alt_text

    Let’s run a memory benchmark tool called redis-benchmark to simulate an arbitrary number of clients connecting at the same time and performing actions on the server, measuring how long it takes for the requests to be completed.

     memtier_benchmark --server localhost -p 19701 -a password
    [RUN #1] Preparing benchmark client...
    [RUN #1] Launching threads now...

    alt_text

    This command instructs memtier_benchmark to connect to your Redis Enterprise database and generates a load doing the following:

    • Write objects only, no reads.
    • Each object is 500 bytes.
    • Each object has random data in the value.
    • Each key has a random pattern, then a colon, followed by a random pattern.

    Run this command until it fills up your database to where you want it for testing. The easiest way to check is on the database metrics page.

     memtier_benchmark --server localhost -p 19701 -a Oracle9ias12# -R -n allkeys -d 500 --key-pattern=P:P --ratio=1:0
    setting requests to 50001
    [RUN #1] Preparing benchmark client...
    [RUN #1] Launching threads now...

    alt_text

    The Datadog Events Stream shows an instant view of your infrastructure and services events to help you troubleshoot issues happening now or in the past. The event stream displays the most recent events generated by your infrastructure and the associated monitors, as shown in the diagram below.

    alt_text

    References:

    - + \ No newline at end of file diff --git a/operate/observability/index.html b/operate/observability/index.html index cb8cea0932..5d05202fb0 100644 --- a/operate/observability/index.html +++ b/operate/observability/index.html @@ -4,7 +4,7 @@ Observability | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Observability

    The following links demonstrate different ways in which you can observe key indicators critical to operating Redis.

    An out-of-the-box predefined build Grafana dashboard for Redis
    How to create Grafana Dashboards for Redis Enterprise cluster in 5 Minutes
    Redis Enterprise Observability with Datadog
    How to monitor Redis with Prometheus and Grafana for Real-Time Analytics
    - + \ No newline at end of file diff --git a/operate/observability/prometheus/index.html b/operate/observability/prometheus/index.html index 22f91c3062..5319112da9 100644 --- a/operate/observability/prometheus/index.html +++ b/operate/observability/prometheus/index.html @@ -4,7 +4,7 @@ How to monitor Redis with Prometheus and Grafana for Real-Time Analytics | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    How to monitor Redis with Prometheus and Grafana for Real-Time Analytics


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    My Image

    Time-series data is basically a series of data stored in time order and produced continuously over a long period of time. These measurements and events are tracked, monitored, downsampled, and aggregated over time. The events could be, for example, IoT sensor data. Every sensor is a source of time-series data. Each data point in the series stores the source information and other sensor measurements as labels. Data labels from every source may not conform to the same structure or order.

    A time-series database is a database system designed to store and retrieve such data for each point in time. Timestamped data can include data generated at regular intervals as well as data generated at unpredictable intervals.

    When do you use a time-series database?

    • When your application needs data that accumulates quickly and your other databases aren’t designed to handle that scale.
    • For financial or industrial applications.
    • When your application needs to perform real-time analysis of billions of records.
    • When your application needs to perform online queries at millisecond timescales, and support CPU-efficient ad-hoc queries.

    Challenges with the existing traditional databases

    You might find numerous solutions that still store time-series data in a relational database, but they’re quite inefficient and come with their own set of drawbacks. A typical time-series database is usually built to only manage time-series data, hence one of the challenges it faces is with use cases that involve some sort of computation on top of time-series data. One good example could be capturing a live video feed in a time-series database. If you want to run an AI model for face recognition, you would have to extract the time-series data, apply some sort of data transformation and then do computation. Relational databases carry the overhead of locking and synchronization that aren’t required for the immutable time-series data. This results in slower-than-required performance for both ingest and queries. When scaling out, it also means investing in additional compute resources. These databases enforce a rigid structure for labels and can’t accommodate unstructured data. They also require scheduled jobs for cleaning up old data. Beyond the time-series use case, these databases are also used for other use cases, which means overuse of running time-series queries may affect other workloads.

    What is Redis Stack ?

    Redis Stack extends the core capabilities of Redis OSS and provides a complete developer experience for debugging and more. In addition to all of the features of Redis OSS, Redis Stack supports:

    • Queryable JSON documents
    • Querying across hashes and JSON documents
    • Time series data support (ingestion & querying), including full-text search
    • Probabilistic data structures

    What is the Time Series data model in Redis Stack?

    Redis Stack supports time-series data model that addresses the needs of handling time-series data. It removes the limitations enforced by relational databases and enables you to collect, manage, and deliver time-series data at scale. As an in-memory database, Redis can ingest over 500,000 records per second on a standard node. Our benchmarks show that you can ingest over 11.5 million records per second with a cluster of 16 Redis shards.

    Time Series support in Redis Stack is resource-efficient. With Redis, you can add rules to compact data by downSampling. For example, if you’ve collected more than one billion data points in a day, you could aggregate the data by every minute in order to downSample it, thereby reducing the dataset size to 1,440 data points (24 * 60 = 1,440). You can also set data retention policies and expire the data by time when you don’t need them anymore. Redis allows you to aggregate data by average, minimum, maximum, sum, count, range, first, and last. You can run over 100,000 aggregation queries per second with sub-millisecond latency. You can also perform reverse lookups on the labels in a specific time range.

    • High volume inserts, low latency reads
    • Query by start time and end-time
    • Aggregated queries (Min, Max, Avg, Sum, Range, Count, First, Last, STD.P, STD.S, Var.P, Var.S) for any time bucket
    • Configurable maximum retention period
    • DownSampling/Compaction - automatically updated aggregate time series
    • Secondary index - each time series has labels (field value pairs) which will allows to query by labels

    Why Prometheus?

    Prometheus is an open-source systems monitoring and alerting toolkit. It collects and stores its metrics as time series data, i.e. metrics information. The metrics are numeric measurements in a time series, meaning changes recorded over time. These metrics are stored with the timestamp at which it was recorded, alongside optional key-value pairs called labels. Metrics play an important role in understanding why your application is working in a certain way.

    Prometheus remote storage adapter for Time Series data model of Redis Stack

    In the Time Series Database over Redis organization you can find projects that help you integrate Time Series data model of Redis Stack with other tools, including Prometheus and Grafana. The Prometheus remote storage adapter for Redis is hosted on GitHub here It’s basically a read/write adapter to use Redis Stack as a backend database. This timeSeries Adapter receives Prometheus metrics via the remote write, and writes to Redis.

    Getting Started

    Prerequisites:

    • Install GIT
    • Install Docker
    • Install Docker Compose

    Step 1. Clone the repository

     git clone https://github.com/RedisTimeSeries/prometheus-redistimeseries-adapter

    Step 2. Examining the Docker Compose File

    This Docker compose defines 4 services -

    1. Prometheus
    2. Adapter
    3. Grafana
    4. Redis
     version: '3'
    services:
    prometheus:
    image: "prom/prometheus:v2.8.0"
    command: ["--config.file=/prometheus.yml"]
    volumes:
    - ./prometheus.yaml:/prometheus.yml
    ports:
    - 9090:9090
    adapter:
    image: "redislabs/prometheus-redistimeseries-adapter:master"
    command: ["-redis-address", "redis:6379", "-web.listen-address", "0.0.0.0:9201"]
    redis:
    image: "redislabs/redistimeseries:edge"
    ports:
    - "6379:6379"
    grafana:
    build: ./grafana/
    ports:
    - "3000:3000"

    Prometheus

    The prometheus service directly uses an image “prom/prometheus” that’s pulled from Docker Hub. It then binds the container and the host machine to the exposed port, 9090. The Prometheus configuration file is accessed by mounting the volume on the host and container.

    Storage Adapter

    The adapter service uses an image “redislabs/prometheus-redistimeseries-adapter:master” that’s pulled from Docker Hub. Sets the default command for the container: -redis-address", "redis:6379 and listen to the address 0.0.0.0:9201.

    Redis

    The Redis service directly uses an image “redislabs/redistimeseries:edge” that’s pulled from Docker Hub. It then binds the container and the host machine to the exposed port, 6379

    Grafana

    The grafana service uses an image that’s built from the Dockerfile in the current directory. It then binds the container and the host machine to the exposed port, 3000.

    Step 3. Run the Docker Compose

    Change directory to compose and execute the following command:

     docker-compose up -d
     docker-compose ps
    NAME COMMAND SERVICE STATUS PORTS
    compose-adapter-1 "/adapter/redis-ts-a…" adapter running
    compose-grafana-1 "/run.sh" grafana running 0.0.0.0:3000->3000/tcp
    compose-prometheus-1 "/bin/prometheus --c…" prometheus running 0.0.0.0:9090->9090/tcp
    compose-redis-1 "docker-entrypoint.s…" redis running 0.0.0.0:6379->6379/tcp

    Step 4. Accessing Grafana

    Open http://hostIP:3000 to access the Grafana dashboard. The default username and password is admin/admin.

    Step 5. Add Prometheus Data Source

    In the left sidebar, you will see the “Configuration” option. Select “Data Source” and choose Prometheus.

    Adding the Prometheus data source

    Click “Save and Test”.

    Step 6. Importing Prometheus Data Source

    Click on “Import” for all the Prometheus dashboards.

    Importing the Prometheus data source

    Step 7. Adding Redis Data Source

    Again, click on “Data Sources” and add Redis.

    Adding the Redis data source

    Click "Import".

    Importing the Redis data source

    Step 8. Running the Sensor Script

    It’s time to test drive a few demo scripts built by the Redis team. To start with, clone the following repository:

     git clone https://github.com/RedisTimeSeries/prometheus-demos

    This repo contains a set of basic demos showcasing the integration of Time Series data model of Redis Stack with Prometheus and Grafana. Let’s pick up a sensor script.

     python3 weather_station/sensors.py

    This script will add random measurements for temperature and humidity for a number of sensors.

    Go to “Add Panel” on the top right corner of the Grafana dashboard and start adding temperature and humidity values.

    alt_text

    Step 9. Accessing Prometheus Dashboard

    Open up https://HOSTIP:9090 to access the Prometheus dashboard for the sensor values without any further configuration.

    Accessing the Prometheus dashboard

    Further References:

    Redis Launchpad
    - + \ No newline at end of file diff --git a/operate/observability/redisdatasource/index.html b/operate/observability/redisdatasource/index.html index 65db203862..0d1016068f 100644 --- a/operate/observability/redisdatasource/index.html +++ b/operate/observability/redisdatasource/index.html @@ -4,7 +4,7 @@ How to add Redis as a datasource in Grafana and build customize dashboards for Analytics | The Home of Redis Developers - + @@ -16,7 +16,7 @@ In our case, we will be using redis-datasource.

     docker run -d -p 3000:3000 --name=grafana -e "GF_INSTALL_PLUGINS=redis-datasource" grafana/grafana

    Step 3. Accessing the Grafana dashboard

    Open https://IP:3000 to access Grafana. The default username/password is admin/admin.

    grafana

    Step 4. Click "Configuration"

    grafana

    Step 5. Add Redis as a Data Source

    grafana

    Step 6. Select "Redis" as data source type

    grafana

    Step 7. Add Redis Database name, Endpoint URL and password

    We'll assume that you already have a Redis server and up and running in your infrastructure. You can also leverage Redis Enterprise Cloud as demonstrated below.

    grafana

    Step 8. Click "Import" under Dashboard

    grafana

    Step 9.Access the Redis datasource Dashboard

    grafana

    Supported commands

    Data Source supports various Redis commands using custom components and provides a unified interface to query any command.

    Query

    Further References

    - + \ No newline at end of file diff --git a/operate/observability/redisexplorer/index.html b/operate/observability/redisexplorer/index.html index f2dc693755..61af263b7a 100644 --- a/operate/observability/redisexplorer/index.html +++ b/operate/observability/redisexplorer/index.html @@ -4,7 +4,7 @@ How to create Grafana Dashboards for Redis Enterprise cluster in 5 Minutes | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    How to create Grafana Dashboards for Redis Enterprise cluster in 5 Minutes


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Redis Enterprise clusters are a set of nodes, typically two or more, providing database services. Clusters are inherently multi-tenant, and a single cluster can manage multiple databases accessed through individual endpoints. Redis Enterprise software provides REST API to retrieve information about cluster, database, nodes and metrics.

    Redis Explorer plugin is the latest plugin in the Grafana Labs that adds support for Redis Enterprise software. It is a plugin for Grafana that connects to Redis Enterprise software clusters using REST API. It provides application pages to add Redis Data Sources for managed databases and dashboards to see cluster configuration.

    The Redis Explorer plugin

    Redis Explorer allows you to create the following dashboard over Grafana:

    Enterprise Clusters Dashboard

    The Enterprise Clusters dashboard provides basic information about the cluster, license, and displays most important metrics.

    Enterprise Cluster Dashboard

    Cluster Overview Dashboard

    The Cluster Overview dashboard provides the most important information and metrics for the selected cluster.

    The Cluster Overview Dashboard

    Cluster Nodes Dashboard

    Cluster Nodes dashboard provides information and metrics for each node participating in the cluster.

    The Cluster Nodes dashboard

    Cluster Databases Dashboard

    The Cluster Databases dashboard provides information and metrics for each databases managed by cluster.

    The Cluster databases dashboard

    Getting Started

    Pre-requisites

    • Grafana 8.0+ is required for Redis Explorer 2.X.
    • Grafana 7.1+ is required for Redis Explorer 1.X.
    • Docker
    • Redis Enterprise Cluster

    Step 1. Setup Redis Enterprise Cluster

    Follow these steps to setup Redis Enterprise cluster nodes.

    Set up Redis Enterprise

    Redis Enterprise Cluster

    Step 2. Install Grafana

     brew install grafana

    Step 3. Install redis-explorer-app

    Use the grafana-cli tool to install from the command line: Redis Application plugin and Redis Data Source will be auto-installed as dependencies.

     grafana-cli plugins install redis-explorer-app

    Step 4. Using Docker

    You can even run Redis Explorer plugin using Docker:

     docker run -p 3000:3000 --name=grafana -e "GF_INSTALL_PLUGINS=redis-explorer-app" grafana/grafana

    Open https://IP:3000 to access grafana. The default username/password is admin/admin.

    Step 5. Log in to Grafana

    Logging into Grafana

    Step 6. Choose Redis Explorer in the sidebar

    Once you add the datasource, you should be able to choose the right option:

    Explorer Options

    Step 7. Getting the Redis Enterprise Cluster Overview

    Redis Enterprise Cluster Overview

    Step 8. Displaying the Redis Enterprise Cluster Nodes

    Redis Enterprise Cluster Nodes

    Further References

    Redis Launchpad
    - + \ No newline at end of file diff --git a/operate/orchestration/docker/images/index.html b/operate/orchestration/docker/images/index.html index e7e347e89e..83a8e32e3c 100644 --- a/operate/orchestration/docker/images/index.html +++ b/operate/orchestration/docker/images/index.html @@ -4,7 +4,7 @@ List of Images | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/operate/orchestration/docker/index.html b/operate/orchestration/docker/index.html index 7e1a11e744..a0d4719970 100644 --- a/operate/orchestration/docker/index.html +++ b/operate/orchestration/docker/index.html @@ -4,7 +4,7 @@ How to Deploy and Run Redis in a Docker container | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    How to Deploy and Run Redis in a Docker container


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Pre-requisites

    Ensure that Docker is installed in your system.

    If you're new, refer to Docker's installation guide to install Docker on Mac.

    To pull and start the Redis Enterprise Software Docker container, run this docker run command in the terminal or command-line for your operating system.

    note

    On Windows, make sure Docker is configured to run Linux-based containers.

    docker run -d --cap-add sys_resource --name rp -p 8443:8443 -p 9443:9443 -p 12000:12000 redislabs/redis

    In the web browser on the host machine, go to https://localhost:8443 to see the Redis Enterprise Software web console.

    Step 1: Click on “Setup”

    Click Setup to start the node configuration steps.

    Setting up nodes

    Step 2: Enter your preferred FQDN

    In the Node Configuration settings, enter a cluster FQDN such as demo.redis.com. Then click the Next button.

    Redis Enterprise Setup

    Enter your license key, if you have one. If not, click the Next button to use the trial version.

    Step 3: Enter the admin credentials

    Enter an email and password for the admin account for the web console.

    Login credentials

    These credentials are also used for connections to the REST API. Click OK to confirm that you are aware of the replacement of the HTTPS SSL/TLS certificate on the node, and proceed through the browser warning.

    Step 4: Create a Database:

    Select “Redis database” and the “single region” deployment, and click Next.

    Creating a database

    Enter a database name such as demodb and click Activate to create your database

    Creating a database

    You now have a Redis database!

    Step 5: Connecting using redis-cli

    After you create the Redis database, you are ready to store data in your database. redis-cli is a built-in simple command-line tool to interact with Redis databases. Run redis-cli, located in the /opt/redislabs/bin directory, to connect to port 12000 and store and retrieve the value of a key in the database:

    $ docker exec -it rp bash
    redislabs@fd8dca50f905:/opt$
    /opt/redislabs/bin/redis-cli -p 12000
    127.0.0.1:12000> auth <enter password>
    OK
    127.0.0.1:12000> set key1 123
    OK
    127.0.0.1:12000> get key1
    "123"

    Next Steps

    Redis Launchpad
    - + \ No newline at end of file diff --git a/operate/orchestration/index.html b/operate/orchestration/index.html index e743b48ef1..c80489bb22 100644 --- a/operate/orchestration/index.html +++ b/operate/orchestration/index.html @@ -4,7 +4,7 @@ Orchestration | The Home of Redis Developers - + @@ -12,7 +12,7 @@ - + \ No newline at end of file diff --git a/operate/orchestration/kubernetes-operator/index.html b/operate/orchestration/kubernetes-operator/index.html index 8ec43474e4..7b01a6f40d 100644 --- a/operate/orchestration/kubernetes-operator/index.html +++ b/operate/orchestration/kubernetes-operator/index.html @@ -4,7 +4,7 @@ Kubernetes Operator: What It Is and Why You Should Really Care About It | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Kubernetes Operator: What It Is and Why You Should Really Care About It


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    My Image

    Kubernetes is popular due to its capability to deploy new apps at a faster pace. Thanks to "Infrastructure as data" (specifically, YAML), today you can express all your Kubernetes resources such as Pods, Deployments, Services, Volumes, etc., in a YAML file. These default objects make it much easier for DevOps and SRE engineers to fully express their workloads without the need to learn how to write code in a programming language like Python, Java, or Ruby.

    Kubernetes is designed for automation. Out of the box, you get lots of built-in automation from the core of Kubernetes. It can speed up your development process by making easy, automated deployments, updates (rolling update), and by managing your apps and services with almost zero downtime. However, Kubernetes can’t automate the process natively for stateful applications. For example, say you have a stateful workload, such as a database application, running on several nodes. If a majority of nodes go down, you’ll need to reload the database from a specific snapshot following specific steps. Using existing default objects, types, and controllers in Kubernetes, this would be impossible to achieve.

    Think of scaling nodes up, or upgrading to a new version, or disaster recovery for your stateful application — these kinds of operations often need very specific steps, and typically require manual intervention. Kubernetes cannot know all about every stateful, complex, clustered application. Kubernetes, on its own, does not know the configuration values for, say, a Redis database cluster, with its arranged memberships and stateful, persistent storage. Additionally, scaling stateful applications in Kubernetes is not an easy task and requires manual intervention.

    Stateful vs Stateless Applications

    Let’s try to understand the difference between stateful versus stateless applications with a simple example. Consider a Kubernetes cluster running a simple web application (without any operator). The YAML file below allows you to create two replicas of NGINX (a stateless application).

     apiVersion: apps/v1
    kind: Deployment
    metadata:
    name: nginx-deployment
    namespace: web
    spec:
    selector:
    matchLabels:
    app: nginx
    replicas: 2
    template:
    metadata:
    labels:
    app: nginx
    spec:
    containers:
    - name: nginx
    image: nginx:1.14.2
    ports:
    - containerPort: 80

    In the example above, a Deployment object named nginx-deployment is created under a namespace “web,” indicated by the .metadata.name field. It creates two replicated Pods, indicated by the .spec.replicas field. The .spec.selector field defines how the Deployment finds which Pods to manage. In this case, you select a label that is defined in the Pod template (app: nginx). The template field contains the following subfields: the Pods are labeled app: nginx using the .metadata.labels field and the Pod template's specification indicates that the Pods run one container, nginx, which runs the nginx Docker Hub image at version 1.14.2. Finally, it creates one container and names it nginx.

    Run the command below to create the Deployment resource:

    kubectl create -f nginx-dep.yaml

    Let us verify if the Deployment was created successfully by running the following command:

     kubectl get deployments
    NAME READY UP-TO-DATE AVAILABLE AGE
    nginx-deployment 2/2 2 2 63s

    The example above shows the name of the Deployment in the namespace. It also displays how many replicas of the application are available to your users. You can also see that the number of desired replicas that have been updated to achieve the desired state is 2.

    alt_text

    You can run the kubectl describe command to get detailed information of deployment resources. To show details of a specific resource or group of resources:

     kubectl describe deploy
    Name: nginx-deployment
    Namespace: default
    CreationTimestamp: Mon, 30 Dec 2019 07:10:33 +0000
    Labels: <none>
    Annotations: deployment.kubernetes.io/revision: 1
    Selector: app=nginx
    Replicas: 2 desired | 2 updated | 2 total | 0 available | 2 unavailable
    StrategyType: RollingUpdate
    MinReadySeconds: 0
    RollingUpdateStrategy: 25% max unavailable, 25% max surge
    Pod Template:
    Labels: app=nginx
    Containers:
    nginx:
    Image: nginx:1.7.9
    Port: 80/TCP
    Host Port: 0/TCP
    Environment: <none>
    Mounts: <none>
    Volumes: <none>
    Conditions:
    Type Status Reason
    ---- ------ ------
    Available False MinimumReplicasUnavailable
    Progressing True ReplicaSetUpdated
    OldReplicaSets: <none>
    NewReplicaSet: nginx-deployment-6dd86d77d (2/2 replicas created)
    Events:
    Type Reason Age From Message
    ---- ------ ---- ---- -------
    Normal ScalingReplicaSet 90s deployment-controller Scaled up replica set nginx-deployment-6dd86d77d to 2

    A Deployment is responsible for keeping a set of Pods running, but it’s equally important to expose an interface to these Pods so that the other external processes can access them. That’s where the Service resource comes in. The Service resource lets you expose an application running in Pods to be reachable from outside your cluster. Let us create a Service resource definition as shown below:

    apiVersion: v1
    kind: Service
    metadata:
    name: nginx-service
    spec:
    selector:
    app: nginx
    ports:
    - port: 80
    targetPort: 80
    type: LoadBalancer

    The above YAML specification creates a new Service object named "nginx-service," which targets TCP port 80 on any Pod with the app=nginx label.

     kubectl get svc -n web
    NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
    nginx-service LoadBalancer 10.107.174.108 localhost 80:31596/TCP 46s

    alt_text

    Let’s scale the Deployment to 4 replicas. We are going to use the kubectl scale command, followed by the deployment type, name, and desired number of instances. The output is similar to this:

    kubectl scale deployments/nginx-deployment --replicas=4
    deployment.extensions/nginx-deployment scaled

    The change was applied, and we have 4 instances of the application available. Next, let’s check if the number of Pods changed. There should now be 4 Pods running in the cluster (as shown in the diagram below)

    alt_text

     kubectl get deployments
    NAME READY UP-TO-DATE AVAILABLE AGE
    nginx-deployment 4/4 4 4 4m

    There are 4 Pods, with different IP addresses. The change was registered in the Deployment events log.

     kubectl get pods -o wide
    NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
    nginx-deployment-6dd86d77d-b4v7k 1/1 Running 0 4m32s 10.1.0.237 docker-desktop none none
    nginx-deployment-6dd86d77d-bnc5m 1/1 Running 0 4m32s 10.1.0.236 docker-desktop none none
    nginx-deployment-6dd86d77d-bs6jr 1/1 Running 0 86s 10.1.0.239 docker-desktop none none
    nginx-deployment-6dd86d77d-wbdzv 1/1 Running 0 86s 10.1.0.238 docker-desktop none none

    Deleting one of the web server Pods triggers work in the control plane to restore the desired state of four replicas. Kubernetes starts a new Pod to replace the deleted one. In this excerpt, the replacement Pod shows a STATUS of ContainerCreating:

     kubectl delete pod nginx-deployment-6dd86d77d-b4v7k

    You will notice that the Nginx static web server is interchangeable with any other replica, or with a new Pod that replaces one of the replicas. It doesn’t store data or maintain state in any way. Kubernetes doesn’t need to make any special arrangements to replace a failed Pod, or to scale the application by adding or removing replicas of the server. Now you might be thinking, what if you want to store the state of the application? Great question.

    Scaling stateful application is hard

    Scaling stateless applications in Kubernetes is easy but it’s not the same case for stateful applications. Stateful applications require manual intervention. Bringing Pods up and down is not that simple. Each Node has an identity and some data attached to it. Removing a Pod means losing its data and disrupting the system.

    alt_text

    Consider a Kubernetes cluster with 6 worker Nodes hosting a Nginx web application connected to a persistent volume as shown above. Here is the snippet of StatefulSets YAML file:


    apiVersion: apps/v1
    kind: StatefulSet
    metadata:
    name: web
    spec:
    serviceName: "nginx"
    replicas: 2
    selector:
    matchLabels:
    app: nginx
    template:
    metadata:
    labels:
    app: nginx
    spec:
    containers:
    - name: nginx
    image: nginx:1.14.2
    ports:
    - containerPort: 80
    name: web
    volumeMounts:
    - name: www
    mountPath: /usr/share/nginx/html
    volumeClaimTemplates:
    - metadata:
    name: www
    spec:
    accessModes: [ "ReadWriteOnce" ]
    resources:
    requests:
    storage: 1Gi

    Kubernetes makes physical storage devices available to your cluster in the form of objects called Persistent Volumes. Each of these Persistent Volumes is consumed by a Kubernetes Pod by issuing a PersistentVolumeClaim object, also known as PVC. A PVC object lets Pods use storage from Persistent Volumes. Imagine a scenario in which we want to downscale a cluster from 5 Nodes to 3 Nodes. Suddenly removing 2 Nodes at once is a potentially destructive operation. This might lead to the loss of all copies of the data. A better way to handle Node removal would be to first migrate data from the Node to be removed to other Nodes in the system before performing the actual Pod deletion. It is important to note that the StatefulSet controller is necessarily generic and cannot possibly know about every possible way to manage data migration and replication. In practice, however, StatefulSets are rarely enough to handle complex, distributed stateful workload systems in production environments.

    Now the question is, how to solve this problem? Enter Operators. Operators were developed to handle the sophisticated, stateful applications that the default Kubernetes controllers aren’t able to handle. While Kubernetes controllers like StatefulSets are ideal for deploying, maintaining, and scaling simple stateless applications, they are not equipped to handle access to stateful resources, or to upgrade, resize, and backup of more elaborate clustered applications such as databases. A Kubernetes Operator fills in the gaps between the capabilities and automation provided by Kubernetes and how your software uses Kubernetes for automation of tasks relevant to your software.

    An Operator is basically an application-specific controller that can help you manage a Kubernetes application. It is a way to package, run, and maintain a Kubernetes application. It is designed to extend the capabilities of Kubernetes, and also simplify application management. This is especially useful for stateful applications, which include persistent storage and other elements external to the application, and may require extra work to manage and maintain.

    Functions of Kubernetes Operator

    A Kubernetes Operator uses the Kubernetes API to create, configure, and manage instances of complex stateful applications on behalf of a Kubernetes user. There is a public repository called OperatorHub.io that is designed to be the public registry for finding Kubernetes Operator backend services. With Operator Hub, developers can easily create an application based on an operator without going through the complexity of crafting an operator from scratch.

    alt_text

    Below are a few examples of popular Kubernetes Operators and their functions and capabilities.

    Kubernetes Operators:

    • Helps you deploy an application on demand (for example, Argo CD operator (Helm is a declarative, GitOps continuous delivery tool for Kubernetes that helps with easy installation and configuration on demand)
    • Helps you install applications with the required configurations and number of application instances
    • Allows you to take and restore backups of the application state (for example, Velero operator manages disaster recovery, backup, and restoration of cluster components such as pv, pvc, deployments, etc., to aid in disaster recovery)
    • Handles the upgrades of the application code plus the changes, such as database schema (for example, Flux is a continuous delivery solution for Kubernetes that allows automating updates to configuration when there is new code to deploy)
    • Can manage a cluster of database servers (for example, MariaDB operator creates MariaDB server and database easily by defining simple custom resource)
    • Can install a database cluster of a declared software version and number of members
    • Scale applications in or out
    • Continues to monitor an application as it runs (for example, Prometheus Operator simplifies the deployment and configuration of Prometheus, Alertmanager, and related monitoring components)
    • Initiate upgrades, automated backups, and failure recovery, simulating failure in all or part of your cluster to test its resilience
    • Allows you to publish a service to applications that don’t support Kubernetes APIs to discover them

    How does an Operator work?

    Operators work by extending the Kubernetes control plane and API. Operators allows you to define a Custom Controller that watches your application and performs custom tasks based on its state. The application you want to watch is usually defined in Kubernetes as a new object: a Custom Resource (CR) that has its own YAML spec and object type that is well understood by the API server. That way, you can define any specific criteria in the custom spec to watch out for, and reconcile the instance when it doesn’t match the spec. The way an Operator’s controller reconciles against a spec is very similar to native Kubernetes controllers, though it is using mostly custom components.

    What is the Redis Enterprise Operator?

    Redis has created an Operator that deploys and manages the lifecycle of a Redis Enterprise Cluster. The Redis Enterprise Operator is the fastest, most efficient way to deploy and maintain a Redis Enterprise cluster in Kubernetes. The Operator creates, configures, and manages Redis Enterprise deployments from a single Kubernetes control plane. This means that you can manage Redis Enterprise instances on Kubernetes just by creating native objects, such as a Deployment, ReplicaSet, StatefulSet, etc. Operators allow full control over the Redis Enterprise cluster lifecycle.

    The Redis Enterprise Operator acts as a custom controller for the custom resource Redis Enterprise Cluster, or “REC”, which is defined through Kubernetes CRD (customer resource definition) and deployed with a YAML file.The Redis Enterprise Operator functions as the logic “glue” between the Kubernetes infrastructure and the Redis Enterprise cluster.

    How does the Redis Enterprise Operator work?

    alt_text

    The Redis Enterprise Operator supports two Custom Resource Definitions (CRDs):

    • Redis Enterprise Cluster (REC): An API to create Redis Enterprise clusters. Note that only one cluster is supported per Operator deployment.
    • Redis Enterprise Database (REDB): An API to create Redis databases running on the Redis Enterprise cluster. Note that the Redis Enterprise Operator is namespaced. High-level architecture and overview of the solution can be found HERE.

    This is how it works:

    1. First, the Redis Enterprise cluster custom resource (“CR” for short) is read and validated by the operator for a cluster specification.
    2. Secondly, cluster StatefulSet, service rigger, cluster admin secrets, RS/UI services are created.
    3. A Redis Enterprise Database CR is read and validated by the operator.
    4. The database is created on the cluster and the database access credentials are stored in a Kubernetes secret object.
    5. The service rigger discovers the new database and configures the Kubernetes service for the database.
    6. An application workload uses the database secret and service for access to data.

    Example of Operator automation

    Consider the YAML file below:

    apiVersion: app.redislabs.com/v1
    kind: RedisEnterpriseCluster
    metadata:
    name: rec
    spec:
    # Add fields here
    nodes: 3

    If you change the number of nodes to 5, the Operator talks to StatefulSets, and changes the number of replicas from 3 to 5. Once that happens, Kubernetes will take over and bootstrap new Nodes one at a time, deploying Pods accordingly. As each becomes ready, the new Nodes join the cluster and become available to Redis Enterprise master Nodes.

    alt_text

    apiVersion: app.redislabs.com/v1
    kind: RedisEnterpriseDatabase
    metadata:
    name: redis-enterprise-database
    spec:
    redisEnterpriseCluster:
    name: redis-enterprise
    Memory: 2G

    alt_text

    In order to create a database, the Operator discovers the resources, talks to the cluster RestAPI, and then it creates the database. The server talks to the API and discovers it. The DB creates a Redis database service endpoint for that database and that will be available.

    In the next tutorial, you will learn how to get started with the Redis Enterprise Kubernetes Operator from scratch, including how to perform non-trivial tasks such as backup, restore, horizontal scaling, and much more. Stay tuned!

    References

    - + \ No newline at end of file diff --git a/operate/orchestration/kubernetes/kubernetes-gke/index.html b/operate/orchestration/kubernetes/kubernetes-gke/index.html index 19fe47448d..af8c053da6 100644 --- a/operate/orchestration/kubernetes/kubernetes-gke/index.html +++ b/operate/orchestration/kubernetes/kubernetes-gke/index.html @@ -4,7 +4,7 @@ Create a Redis database on Google Kubernetes Engine | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Create a Redis database on Google Kubernetes Engine


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Step 1. Prerequisites

    Step 2. Ensure that gcloud is installed on your local Linux system:

    $ gcloud -v
    Google Cloud SDK 320.0.0
    alpha 2020.12.04
    app-engine-go 1.9.71
    app-engine-java 1.9.84
    app-engine-python 1.9.91
    app-engine-python-extras 1.9.91

    Step 3. Create a 5 Node GKE cluster:

    $ gcloud container clusters create testredis  --subnetwork default --num-nodes 5 --machine-type e2-standard-8 --enable-basic-auth --region us-east1

    Step 4. Create a new namespace

    [node1 kubelabs]$ kubectl create namespace demo
    namespace/demo created

    Step 5. Switch context to the newly created namespace

    $ kubectl config set-context --current --namespace=demo
    Context "kubernetes-admin@kubernetes" modified.

    Step 6. Deploy the operator bundle

    To deploy the default installation with kubectl, the following command will deploy a bundle of all the YAML declarations required for the operator. You can download the bundle YAML file via this link:

    $ kubectl apply -f bundle.yaml
    role.rbac.authorization.k8s.io/redis-enterprise-operator created
    rolebinding.rbac.authorization.k8s.io/redis-enterprise-operator created
    serviceaccount/redis-enterprise-operator created
    customresourcedefinition.apiextensions.k8s.io/redisenterpriseclusters.app.redislabs.com created
    deployment.apps/redis-enterprise-operator created
    customresourcedefinition.apiextensions.k8s.io/redisenterprisedatabases.app.redislabs.com created

    Step 7. Verifying the Deployment:

    Run the following command to verify redis-enterprise-operator deployment is running.

    kubectl get deployment
    NAME READY UP-TO-DATE AVAILABLE AGE
    redis-enterprise-operator 1/1 1 1 9m34s

    Step 8. Create a Redis Enterprise Cluster

    Create a Redis Enterprise Cluster (REC) using the default configuration, which is suitable for development type deployments and works in typical scenarios:

    $ kubectl apply -f crds/app_v1_redisenterprisecluster_cr.yaml

    redisenterprisecluster.app.redislabs.com/redis-enterprise created

    Step 9. Verifying the Redis Enterprise Cluster

    rec is a shortcut for RedisEnterpriseCluster. The cluster takes around 5-10 minutes to come up. Run the command below to check that the RedisEnterpriseCluster is up:

    $ kubectl get rec
    NAME AGE
    redis-enterprise 14s
    [node1 redis-enterprise-k8s-docs]$

    Step 10. Listing Kubernetes Resources

    $ kubectl get po,svc,deploy
    NAME READY STATUS RESTARTS AGE
    pod/redis-enterprise-0 2/2 Running 0 6m42s
    pod/redis-enterprise-1 2/2 Running 0 4m34s
    pod/redis-enterprise-2 2/2 Running 0 2m18s
    pod/redis-enterprise-operator-58f8566fd7-5kcvz 1/1 Running 0 69m
    pod/redis-enterprise-services-rigger-5849b86c65-lwql9 1/1 Running 0 6m42s
    NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
    service/kubernetes ClusterIP 10.3.240.1 <none> 443/TCP 71m
    service/redis-enterprise ClusterIP None <none> 9443/TCP,8001/TCP,8070/TCP 6m42s
    service/redis-enterprise-ui LoadBalancer 10.3.246.252 35.196.117.24 8443:31473/TCP 6m42s
    NAME READY UP-TO-DATE AVAILABLE AGE
    deployment.apps/redis-enterprise-operator 1/1 1 1 69m
    deployment.apps/redis-enterprise-services-rigger 1/1 1 1 6m44s

    You can verify the Pods and list of services using the Google Cloud Dashboard UI:

    Redis Enterprise UI

    Step 11. Listing the Secrets

    kubectl get secrets redis-enterprise -o yaml | grep password | awk '{print $2}'
    bXVLeHRpblY=

    Step 12. Listing the Password

    echo bXVLeHRpblY= | base64 -d

    Step 13. Creating a Database

    Open https://localhost:8443 in the browser to see the Redis Enterprise Software web console. Click on "Setup", add your preferred DNS and admin credentials and proceed further to create your first Redis database using Redis Enterprise.

    Next Steps

    Redis Launchpad
    - + \ No newline at end of file diff --git a/operate/provisioning/azure-cache-terraform-private/index.html b/operate/provisioning/azure-cache-terraform-private/index.html index 69daaba85d..62a35a8496 100644 --- a/operate/provisioning/azure-cache-terraform-private/index.html +++ b/operate/provisioning/azure-cache-terraform-private/index.html @@ -4,7 +4,7 @@ Azure Cache for Redis Enterprise using Terraform with Private Link | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    Azure Cache for Redis Enterprise using Terraform with Private Link

    Azure Private Link for Azure Cache for Redis provides private connectivity from a virtual network to your cache instance. This means that you can now use Azure Private Link to connect to an Azure Cache for Redis instance from your virtual network via a private endpoint, which is assigned a private IP address in a subnet within the virtual network. It simplifies the network architecture and secures the connection between endpoints in Azure by eliminating data exposure to the public internet. Private Link carries traffic privately, reducing your exposure to threats and helps you meet compliance standards.

    Azure Resource Manager (a.k.a AzureRM) is the deployment and management service for Azure. It provides a management layer that enables you to create, update, and delete resources in your Azure account. You can use management features, like access control, locks, and tags, to secure and organize your resources after deployment. The "azurerm_redis_enterprise_cluster" is a resource that manages a Redis Enterprise cluster. This is a template to get started with the 'azurerm_redis_enterprise_cluster' resource available in the 'azurerm' provider with Terraform.

    Prerequisites

    1. Terraform
    2. Azure CLI

    Step 1. Getting Started

    Login in to Azure using the Azure CLI

    az login

    Login with a Service Principal will also work

    Login using an Azure Service Principal

    az login --service-principal --username APP_ID --tenant TENANT_ID --password [password || /path/to/cert]

    Step 2: Clone the repository

    git clone https://github.com/redis-developer/acre-terraform

    Step 3: Initialize the repository

    cd acre-terraform
    terraform init

    The output should include: Terraform has been successfully initialized

    Step 4: Modify the variables (optional)

    The default variables are setup to deploy the smallest 'E10' instance into the 'East US' region. Changes can be made by updating the variables.tf file.

    Step 5: Verify the plan

    The 'plan' output will show you everything being created by the template.

    terraform plan

    The output should include: Plan: 18 to add, 0 to change, 0 to destroy.

    Step 6: Apply the plan

    When the plan looks good, 'apply' the template.

    terraform apply

    The output should include: Apply complete! Resources: 18 added, 0 changed, 0 destroyed.

    Step 7: Connect using generated output

    The access key is sensitive, so viewing the outputs must be requested specifically. The output is also in JSON format.

    terraform output redisgeek_config

    Example output:

    {
    "hostname" = "redisgeek-8jy4.eastus.redisenterprise.cache.azure.net"
    "access_key" = "DQYABC3uRMXXXXXXXXXXXXXXXXTRkfgOXXXPjs82Y="
    "port" = "10000"
    }

    Resources

    1. How to use Redis Cache for Redis like a Pro
    2. Do More with Azure Cache for Redis, Enterprise Tiers

    References

    - + \ No newline at end of file diff --git a/operate/provisioning/azure-cache-terraform/index.html b/operate/provisioning/azure-cache-terraform/index.html index a8179cd95f..13339d3d03 100644 --- a/operate/provisioning/azure-cache-terraform/index.html +++ b/operate/provisioning/azure-cache-terraform/index.html @@ -4,7 +4,7 @@ Azure Cache for Redis Enterprise using Terraform | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    Azure Cache for Redis Enterprise using Terraform

    The Enterprise Tiers of Azure Cache for Redis are generally available as a native fully managed service on Microsoft Azure. This offering combines Azure’s global presence, flexibility, security, and compliance with Redis Enterprise’s unmatched availability, performance, and extended data structure functionality to create the best experience for enterprises. Enterprise features include:

    Azure Resource Manager (a.k.a AzureRM) is the deployment and management service for Azure. It provides a management layer that enables you to create, update, and delete resources in your Azure account. You use management features, like access control, locks, and tags, to secure and organize your resources after deployment.

    The "azurerm_redis_enterprise_cluster" is a resource that manages a Redis Enterprise cluster. This is a template to get started with the 'azurerm_redis_enterprise_cluster' resource available in the 'azurerm' provider with Terraform.

    Prerequisites

    1. Terraform CLI
    2. Azure CLI

    Step 1. Getting Started

    Login in to Azure using the Azure CLI

    az login

    Step 2: Clone the repository

    git clone https://github.com/redis-developer/acre-terraform-simple

    Step 3: Initialize the repository

    cd acre-terraform-simple
    terraform init

    The output should include: Terraform has been successfully initialized

    Step 4: Modify the variables(optional)

    The default variables are setup to deploy the smallest 'E10' instance into the 'East US' region. Changes can be made by updating the variables.tf file.

    Step 5: Verify the plan

    The 'plan' output will show you everything being created by the template.

    terraform plan

    The plan step does not make any changes in Azure

    Step 6: Apply the plan

    When the plan looks good, 'apply' the template.

    terraform apply

    Step 7: Connect using generated output

    The access key is sensitive, so viewing the outputs must be requested specifically. The output is also in JSON format.

    terraform output redisgeek_config

    Example output:

    {
    "hostname" = "redisgeek-8jy4.eastus.redisenterprise.cache.azure.net"
    "access_key" = "DQYABC3uRMyDguEXXXXXXXXXXWTRkfgOPjs82Y="
    "port" = "10000"
    }

    Resources

    How to use Redis Cache for Redis like a Pro
    Do More with Azure Cache for Redis, Enterprise Tiers

    References

    - + \ No newline at end of file diff --git a/operate/provisioning/index.html b/operate/provisioning/index.html index 6f8da8cec7..a5af587614 100644 --- a/operate/provisioning/index.html +++ b/operate/provisioning/index.html @@ -4,7 +4,7 @@ Provisioning | The Home of Redis Developers - + @@ -12,7 +12,7 @@ - + \ No newline at end of file diff --git a/operate/provisioning/terraform/index.html b/operate/provisioning/terraform/index.html index 0d955bda43..66d29a8492 100644 --- a/operate/provisioning/terraform/index.html +++ b/operate/provisioning/terraform/index.html @@ -4,7 +4,7 @@ How to Deploy and Manage Redis Databases on AWS Using Terraform | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    How to Deploy and Manage Redis Databases on AWS Using Terraform


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis
    Profile picture for Rahul Chauhan
    Author:
    Rahul Chauhan, Corporate Solution Architect at Redis

    terraform

    Development teams today are embracing more and more DevOps principles, such as continuous integration and continuous delivery (CI/CD). Therefore, the need to manage infrastructure-as-code (IaC) has become an essential capability for any cloud service. IaC tools allow you to manage infrastructure with configuration files rather than through a graphical user interface. IaC allows you to build, change, and manage your infrastructure in a safe, consistent, and repeatable way by defining resource configurations that you can version, reuse, and share.

    A leading tool in the IaC space is HashiCorp Terraform, which supports the major cloud providers and services with its providers and modules cloud infrastructure automation ecosystem for provisioning, compliance, and management of any cloud, infrastructure, and service

    What is Terraform?

    Terraform is an open source IaC software tool that provides a consistent CLI workflow to manage hundreds of cloud services. Terraform codifies cloud APIs into declarative configuration files, which can then be shared amongst team members, treated as code, edited, reviewed, and versioned. It enables you to safely and predictably create, change, and improve infrastructure.

    Capabilities of Terraform

    • Terraform is not just a configuration management tool. It also focuses on the higher-level abstraction of the data center and associated services, while allowing you to use configuration management tools on individual systems.
    • It supports multiple cloud providers, such as AWS, GCP, Azure, DigitalOcean, etc.
    • It provides a single unified syntax, instead of requiring operators to use independent and non-interoperable tools for each platform and service.
    • Manages both existing service providers and custom in-house solutions.
    • Terraform is easily portable to any other provider.
    • Provides immutable infrastructure where configuration changes smoothly.
    • Supports client-only architecture, so no need for additional configuration management on a server.
    • Terraform is very flexible, using a plugin-based model to support providers and provisioners, giving it the ability to support almost any service that exposes APIs.
    • It is not intended to give low-level programmatic access to providers, but instead provides a high-level syntax for describing how cloud resources and services should be created, provisioned, and combined.
    • It provides a simple, unified syntax, allowing almost any resource to be managed without learning new tooling.

    The HashiCorp Terraform Redis Enterprise Cloud provider

    Redis has developed a Terraform provider for Redis Enterprise Cloud. The HashiCorp Terraform Redis Enterprise Cloud provider allows customers to deploy and manage Redis Enterprise Cloud subscriptions, databases, and network peering easily as code, on any cloud provider. It is a plugin for Terraform that allows Redis Enterprise Cloud Flexible customers to manage the full life cycle of their subscriptions and related Redis databases.

    The Redis Enterprise Cloud provider is used to interact with the resources supported by Redis Enterprise Cloud. The provider needs to be configured with the proper credentials before it can be used. Use the navigation to the left to read about the available provider resources and data sources.

    rediscloud

    Before we jump into the implementation, let us take a moment to better understand the Terraform configuration. A Terraform configuration is a complete document in the Terraform language that tells Terraform how to manage a given collection of infrastructure. A configuration can consist of multiple files and directories. Terraform is broken down into three main components:

    • Providers
    • Data sources
    • Resources

    Providers

    A provider is the first resource that will need to be defined in any project under the Terraform configuration file. The provider gives you access to the API you will be interacting with to create resources. Once the provider has been configured and authenticated, a vast amount of resources are now available to be created. Terraform has more than 100+ cloud providers it serves.

    A provider defines resources and data for a particular infrastructure, such as AWS. As shown below, the terraform block {} contains terraform settings, including the required providers Terraform will use to provision your infrastructure (for example, rediscloud provider).

     terraform {
    required_providers {
    rediscloud = {
    source = "RedisLabs/rediscloud"
    version = "0.2.2"
    }
    }
    }

    The provider {} block configures the specific provider. In the following example, it is AWS.

     cloud_provider {

    provider = "AWS"
    cloud_account_id = 1
    region {
    region = "us-east-1"
    networking_deployment_cidr = "10.0.0.0/24"
    preferred_availability_zones = ["us-east-1a"]
    }
    }

    Resources

    Resources are the most important element in the Terraform language. This is where you describe the piece of infrastructure to be created, and this can range from a compute instance to defining specific permissions and much more.

    As shown below, the resource {} block is used to define components of your infrastructure. A resource might be a physical or virtual component, such as EC2, or it could be a logical component, such as a Heroku application.

     resource "random_password" "passwords" {
    count = 2
    length = 20
    upper = true
    lower = true
    number = true
    }

    The resource {} block has two strings before the block: resource types and resource names. The prefix of the type maps to the name of the provider. For example, the resource type “random_password” and the resource name “passwords” form a unique identifier of the resource. Terraform uses this ID to identify the resource.

    Data sources

    Data sources allow Terraform to use information defined outside of Terraform, defined by another separate Terraform configuration, or modified by functions. Each provider may offer data sources alongside its set of resource types. A data source is accessed via a special kind of resource known as a data resource, declared using a data block.

     data "rediscloud_payment_method" "card" {
    card_type = "Visa"
    last_four_numbers = "XXXX"
    }

    A data block requests that Terraform read from a given data source ("rediscloud_payment_method") and export the result under the given local name ("card"). The name is used to refer to this resource from elsewhere in the same Terraform module, but has no significance outside of the scope of a module.

    Within the block body (between { and }) are query constraints defined by the data source. Most arguments in this section depend on the data source, and indeed in this example card_type and last_four_numbers are all arguments defined specifically for the rediscloud_payment_method data source.

    Configure Redis Enterprise Cloud programmatic access

    In order to set up authentication with the Redis Enterprise Cloud provider, a programmatic API key must be generated for Redis Enterprise Cloud. The Redis Enterprise Cloud documentation contains the most up-to-date instructions for creating and managing your key(s) and IP access.

    tip

    Flexible and Annual Redis Enterprise Cloud subscriptions can leverage a RESTful API that permits operations against a variety of resources, including servers, services, and related infrastructure. The REST API is not supported for Fixed or Free subscriptions.

     provider "rediscloud" { } # Example resource configuration
    resource "rediscloud_subscription" "example" { # ... }

    Prerequisites:

    • Install Terraform on MacOS.
    • Create a free Redis Enterprise Cloud account.
    • Create your first subscription.
    • Enable API

    Step 1: Install Terraform on MacOS

    Use Homebrew to install Terraform on MacOS as shown below:

     brew install terraform

    Step 2: Sign up for a free Redis Enterprise Cloud account

    Follow this tutorial to sign up for a free Redis Enterprise Cloud account.

    Redis Cloud

    Step 3: Enable Redis Enterprise Cloud API

    If you have a Flexible (or Annual) Redis Enterprise Cloud subscription, you can use a REST API to manage your subscription programmatically. The Redis Cloud REST API is available only to Flexible or Annual subscriptions. It is not supported for Fixed or Free subscriptions.

    For security reasons, the Redis Cloud API is disabled by default. To enable the API:

    • Sign in to your Redis Cloud subscription as an account owner.

    • From the menu, choose Access Management.

    • When the Access Management screen appears, select the API Keys tab.

    Terraform

    If a Copy button appears to the right of the API account key, the API is enabled. This button copies the account key to the clipboard.

    If you see an Enable API button, select it to enable the API and generate your API account key.

    To authenticate REST API calls, you need to combine the API account key with an API user key to make API calls.

    Terraform

    Step 4: Create a main.tf file

    It’s time to create an empty “main.tf” file and start adding the provider, resource and data sources as shown below:

     terraform {
    required_providers {
    rediscloud = {
    source = "RedisLabs/rediscloud"
    version = "0.2.2"
    }
    }
    }
    # Provide your credit card details
    data "rediscloud_payment_method" "card" {
    card_type = "Visa"
    last_four_numbers = "XXXX"
    }
    # Generates a random password for the database
    resource "random_password" "passwords" {
    count = 2
    length = 20
    upper = true
    lower = true
    number = true
    special = false
    }
    resource "rediscloud_subscription" "rahul-test-terraform" {
    name = "rahul-test-terraform"
    payment_method_id = data.rediscloud_payment_method.card.id
    memory_storage = "ram"
    cloud_provider {

    provider = "AWS"
    cloud_account_id = 1
    region {
    region = "us-east-1"
    networking_deployment_cidr = "10.0.0.0/24"
    preferred_availability_zones = ["us-east-1a"]
    }
    }
    database {
    name = "db-json"
    protocol = "redis"
    memory_limit_in_gb = 1
    replication = true
    data_persistence = "aof-every-1-second"
    module {
    name = "RedisJSON"
    }
    throughput_measurement_by = "operations-per-second"
    throughput_measurement_value = 10000
    password = random_password.passwords[1].result
    }
    }

    Step 5: Create an execution plan

    The Terraform plan command creates an execution plan, which lets you preview the changes that Terraform plans to make to your infrastructure. By default, when Terraform creates a plan, it reads the current state of any already existing remote objects to make sure that Terraform state is up to date. It then compares the current configuration to the prior state and then proposes a set of change actions that should make the remote object match the configuration.

     % terraform plan


    Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
    + create

    Terraform will perform the following actions:

    # random_password.passwords[0] will be created
    + resource "random_password" "passwords" {
    + id = (known after apply)
    + length = 20
    + lower = true
    + min_lower = 0
    + min_numeric = 0
    + min_special = 0
    + min_upper = 0
    + number = true
    + result = (sensitive value)
    + special = false
    + upper = true
    }

    # random_password.passwords[1] will be created
    + resource "random_password" "passwords" {
    + id = (known after apply)
    + length = 20
    + lower = true
    + min_lower = 0
    + min_numeric = 0
    + min_special = 0
    + min_upper = 0
    + number = true
    + result = (sensitive value)
    + special = false
    + upper = true
    }

    # rediscloud_subscription.rahul-test-terraform will be created
    + resource "rediscloud_subscription" "rahul-test-terraform" {
    + id = (known after apply)
    + memory_storage = "ram"
    + name = "rahul-test-terraform"
    + payment_method_id = "XXXX"
    + persistent_storage_encryption = true

    + cloud_provider {
    + cloud_account_id = "1"
    + provider = "AWS"

    + region {
    + multiple_availability_zones = false
    + networking_deployment_cidr = "10.0.0.0/24"
    + networks = (known after apply)
    + preferred_availability_zones = [
    + "us-east-1a",
    ]
    + region = "us-east-1"
    }
    }

    + database {
    # At least one attribute in this block is (or was) sensitive,
    # so its contents will not be displayed.
    }
    }

    Plan: 3 to add, 0 to change, 0 to destroy.

    ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────

    :::note

    You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run "terraform apply" now.

    :::

    Step 6: Execute the action

    The Terraform apply command executes the actions proposed in a Terraform plan.

     terraform apply


    Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
    + create

    Terraform will perform the following actions:

    # random_password.passwords[0] will be created
    + resource "random_password" "passwords" {
    + id = (known after apply)
    + length = 20
    + lower = true
    + min_lower = 0
    + min_numeric = 0
    + min_special = 0
    + min_upper = 0
    + number = true
    + result = (sensitive value)
    + special = false
    + upper = true
    }

    # random_password.passwords[1] will be created
    + resource "random_password" "passwords" {
    + id = (known after apply)
    + length = 20
    + lower = true
    + min_lower = 0
    + min_numeric = 0
    + min_special = 0
    + min_upper = 0
    + number = true
    + result = (sensitive value)
    + special = false
    + upper = true
    }

    # rediscloud_subscription.rahul-test-terraform will be created
    + resource "rediscloud_subscription" "rahul-test-terraform" {
    + id = (known after apply)
    + memory_storage = "ram"
    + name = "rahul-test-terraform"
    + payment_method_id = "XXXX"
    + persistent_storage_encryption = true

    + cloud_provider {
    + cloud_account_id = "1"
    + provider = "AWS"

    + region {
    + multiple_availability_zones = false
    + networking_deployment_cidr = "10.0.0.0/24"
    + networks = (known after apply)
    + preferred_availability_zones = [
    + "us-east-1a",
    ]
    + region = "us-east-1"
    }
    }

    + database {
    # At least one attribute in this block is (or was) sensitive,
    # so its contents will not be displayed.
    }
    }

    Plan: 3 to add, 0 to change, 0 to destroy.

    Do you want to perform these actions?
    Terraform will perform the actions described above.
    Only 'yes' will be accepted to approve.

    Enter a value: yes

    random_password.passwords[0]: Creating...
    random_password.passwords[1]: Creating...
    random_password.passwords[1]: Creation complete after 0s [id=none]
    random_password.passwords[0]: Creation complete after 0s [id=none]
    rediscloud_subscription.rahul-test-terraform: Creating...
    rediscloud_subscription.rahul-test-terraform: Still creating... [10s elapsed]
    rediscloud_subscription.rahul-test-terraform: Still creating... [20s elapsed]
    rediscloud_subscription.rahul-test-terraform: Creation complete after 8m32s [id=1649277]

    Apply complete! Resources: 3 added, 0 changed, 0 destroyed.

    Step 7: Verify the database

    You can now verify the new database created under Subscription named “db-json.”

    Deploy a Redis Database with JSON and [other](Redis Stack ) features on AWS using Terraform:

    terraform {
    required_providers {
    rediscloud = {
    source = "RedisLabs/rediscloud"
    version = "0.2.2"
    }
    }
    }
    # Provide your credit card details
    data "rediscloud_payment_method" "card" {
    card_type = "Visa"
    last_four_numbers = "XXXX"
    }
    # Generates a random password for the database
    resource "random_password" "passwords" {
    count = 2
    length = 20
    upper = true
    lower = true
    number = true
    special = false
    }
    resource "rediscloud_subscription" "rahul-test-terraform" {
    name = "rahul-test-terraform"
    payment_method_id = data.rediscloud_payment_method.card.id
    memory_storage = "ram"
    cloud_provider {

    provider = "AWS"
    cloud_account_id = 1
    region {
    region = "us-east-1"
    networking_deployment_cidr = "10.0.0.0/24"
    preferred_availability_zones = ["us-east-1a"]
    }
    }
    database {
    name = "db-json"
    protocol = "redis"
    memory_limit_in_gb = 1
    replication = true
    data_persistence = "aof-every-1-second"
    module {
    name = "RedisJSON"
    }
    throughput_measurement_by = "operations-per-second"
    throughput_measurement_value = 10000
    password = random_password.passwords[1].result
    }
    }

    Step 8: Cleanup

    The Terraform destroy command is a convenient way to destroy all remote objects managed by a particular Terraform configuration. While you will typically not want to destroy long-lived objects in a production environment, Terraform is sometimes used to manage ephemeral infrastructure for development purposes, in which case you can use terraform destroy’ to conveniently clean up all of those temporary objects once you are finished with your work.

    % terraform destroy
    random_password.passwords[0]: Refreshing state... [id=none]
    random_password.passwords[1]: Refreshing state... [id=none]
    rediscloud_subscription.rahul-test-terraform: Refreshing state... [id=1649277]

    Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
    - destroy

    Terraform will perform the following actions:

    # random_password.passwords[0] will be destroyed
    - resource "random_password" "passwords" {
    - id = "none" -> null
    - length = 20 -> null
    - lower = true -> null
    - min_lower = 0 -> null
    - min_numeric = 0 -> null
    - min_special = 0 -> null
    - min_upper = 0 -> null
    - number = true -> null
    - result = (sensitive value)
    - special = false -> null
    - upper = true -> null
    }

    # random_password.passwords[1] will be destroyed
    - resource "random_password" "passwords" {
    - id = "none" -> null
    - length = 20 -> null
    - lower = true -> null
    - min_lower = 0 -> null
    - min_numeric = 0 -> null
    - min_special = 0 -> null
    - min_upper = 0 -> null
    - number = true -> null
    - result = (sensitive value)
    - special = false -> null
    - upper = true -> null
    }

    # rediscloud_subscription.rahul-test-terraform will be destroyed
    - resource "rediscloud_subscription" "rahul-test-terraform" {
    - id = "1649277" -> null
    - memory_storage = "ram" -> null
    - name = "rahul-test-terraform" -> null
    - payment_method_id = "XXXX" -> null
    - persistent_storage_encryption = true -> null

    - cloud_provider {
    - cloud_account_id = "1" -> null
    - provider = "AWS" -> null

    - region {
    - multiple_availability_zones = false -> null
    - networking_deployment_cidr = "10.0.0.0/24" -> null
    - networks = [
    - {
    - networking_deployment_cidr = "10.0.0.0/24"
    - networking_subnet_id = "subnet-0055e8e3ee3ea796e"
    - networking_vpc_id = ""
    },
    ] -> null
    - preferred_availability_zones = [
    - "us-east-1a",
    ] -> null
    - region = "us-east-1" -> null
    }
    }

    - database {
    # At least one attribute in this block is (or was) sensitive,
    # so its contents will not be displayed.
    }
    }

    Plan: 0 to add, 0 to change, 3 to destroy.

    Do you really want to destroy all resources?
    Terraform will destroy all your managed infrastructure, as shown above.
    There is no undo. Only 'yes' will be accepted to confirm.

    Enter a value: yes

    rediscloud_subscription.rahul-test-terraform: Destroying... [id=1649277]

    rediscloud_subscription.rahul-test-terraform: Destruction complete after 1m34s
    random_password.passwords[0]: Destroying... [id=none]
    random_password.passwords[1]: Destroying... [id=none]
    random_password.passwords[0]: Destruction complete after 0s
    random_password.passwords[1]: Destruction complete after 0s

    Destroy complete! Resources: 3 destroyed.

    Further References:

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/course-wrap-up/index.html b/operate/redis-at-scale/course-wrap-up/index.html index 6ced4ff928..244ff9dd86 100644 --- a/operate/redis-at-scale/course-wrap-up/index.html +++ b/operate/redis-at-scale/course-wrap-up/index.html @@ -4,7 +4,7 @@ Conclusion of Running Redis at Scale | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Conclusion of Running Redis at Scale


    Profile picture for Justin Castilla
    Author:
    Justin Castilla, Senior Developer Advocate at Redis
    Profile picture for Elena Kolevska
    Author:
    Elena Kolevska, Technical Enablement Manager, EMEA at Redis
    Profile picture for Kurt Moeller
    Author:
    Kurt Moeller, Technical Enablement Manager, US at Redis

    You've made it! Thanks again for trying out this course on the Redis Developer Site. We hope you've enjoyed it, and we hope it's provided you with the tools you need to successfully scale Redis with your applications.



    If you would like to receive a certificate of completion for this course, head to Redis University to enroll in the full-format class which includes homework for each course section. If you pass the course with a grade of sixty-five percent or greater, you'll be able to generate your certificate and post it to your LinkedIn profile.


    Please consider subscribing to our Youtube Channel to stay up to date with all of our latest tutorials, interviews, and general news.

    And if you have any feedback or insights you want to share with the Redis University team, don't hesitate to leave a note in our online chat on our Discord server found here.

    Again, we're grateful you've taken the time to work through our course. Happy learning and see you next time!

    Best wishes,

    Elena, Kurt, and Justin

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/high-availability/basic-replication/index.html b/operate/redis-at-scale/high-availability/basic-replication/index.html index 409101cfda..4b3aacec7d 100644 --- a/operate/redis-at-scale/high-availability/basic-replication/index.html +++ b/operate/redis-at-scale/high-availability/basic-replication/index.html @@ -4,7 +4,7 @@ 3.1 Basic Replication | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    3.1 Basic Replication



    Replication in Redis follows a simple primary-replica model where the replication happens in one direction - from the primary to one or multiple replicas. Data is only written to the primary instance and replicas are kept in sync so that they’re exact copies of the primaries.

    To create a replica, you instantiate a Redis server instance with the configuration directive replicaof set to the address and port of the primary instance. Once the replica instance is up and running, the replica will try to sync with the primary. To transfer all of its data as efficiently as possible, the primary instance will produce a compacted version of the data in a snapshot (.rdb) file and send it to the replica.

    The replica will then read the snapshot file and load all of its data into memory, which will bring it to the same state the primary instance had at the moment of creating the .rdb file. When the loading stage is done, the primary instance will send the backlog of any write commands run since the snapshot was made. Finally, the primary instance will send the replica a live stream of all subsequent commands.

    By default, replication is asynchronous. This means that if you send a write command to Redis you will receive your acknowledged response first, and only then will the command be replicated to the replica.

    If the primary goes down after acknowledging a write but before the write can be replicated, then you might have data loss. To avoid this, the client can use the WAIT command. This command blocks the current client until all of the previous write commands are successfully transferred and acknowledged by at least some specified number of replicas.

    For example, if we send the command WAIT 2 0, the client will block (will not return a response to the client) until all of the previous write commands issued on that connection have been written to at least 2 replicas. The second argument - 0 - will instruct the server to block indefinitely, but we could set it to a number (in milliseconds) so that it times out after a while and returns the number of replicas that successfully acknowledged the commands.

    Replicas are read-only. This means that you can configure your clients to read from them, but you cannot write data to them. If you need additional read throughput, you can configure your Redis client to read from replicas as well as from your primary node. However, it's often easier just to scale out your cluster. This lets you scale reads and writes without writing any complex client logic.

    Also, you should know about Active-Active, an advanced feature of Redis Enterprise and Redis Cloud. Active-Active replicates entire databases across geographically-distributed clusters. With Active-Active, you can write locally to any replica databases, and those writes will be reflected globally. Something to keep in mind when you're really scaling out!

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/high-availability/exercise-1/index.html b/operate/redis-at-scale/high-availability/exercise-1/index.html index 7a02f007a4..40f2968284 100644 --- a/operate/redis-at-scale/high-availability/exercise-1/index.html +++ b/operate/redis-at-scale/high-availability/exercise-1/index.html @@ -4,7 +4,7 @@ 3.2 Exercise - Enabling Basic Replication | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    3.2 Exercise - Enabling Basic Replication

    Step 1

    First let’s create and configure the primary instance. We’ll start with a few configuration changes in its primary.conf configuration file.

    $ touch primary.conf  # Create the configuration file

    Now open the primary.conf file with your favorite text editor and set the following configuration directives:

    # Create a strong password here
    requirepass a_strong_password

    # AUTH password of the primary instance in case this instance becomes a replica
    masterauth a_strong_password

    # Enable AOF file persistence
    appendonly yes

    # Choose a name for the AOF file
    appendfilename "primary.aof"

    Finally, let’s start the primary instance:

    $ redis-server ./primary.conf

    Step 2

    Next, let’s prepare the configuration file for the replica:

    $ touch replica.conf

    Let’s add some settings to the file we just created:

    # Port on which the replica should run
    port 6380

    # Address of the primary instance
    replicaof 127.0.0.1 6379

    # AUTH password of the primary instance
    masterauth a_strong_password

    # AUTH password for the replica instance
    requirepass a_strong_password

    And let’s start the replica:

    $ redis-server ./replica.conf

    Step 3

    Open two terminal tabs and use them to start connections to the primary and replica instances:

    # Tab 1 (primary)
    $ redis-cli
    # Tab 2 (replica)
    $ redis-cli -p 6380

    Authenticate on both tabs by running the command AUTH followed by your password:

    AUTH a_strong_password

    On the second (replica) tab run the MONITOR command which will allow you to see every command executed against that instance.

    Go back to the first (primary) tab and execute any write command, for example

    127.0.0.1:6379> SET foo bar

    In the second tab you should see that the command was already sent to the replica:

    1617230062.389077 [0 127.0.0.1:6379] "SELECT" "0"
    1617230062.389092 [0 127.0.0.1:6379] "set" "foo" "bar"

    Step 4

    Keep the instances running, or at least their configuration files around. We’ll need them for the next exercise.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/high-availability/exercise-2/index.html b/operate/redis-at-scale/high-availability/exercise-2/index.html index 8dc586748e..18c9db528e 100644 --- a/operate/redis-at-scale/high-availability/exercise-2/index.html +++ b/operate/redis-at-scale/high-availability/exercise-2/index.html @@ -4,7 +4,7 @@ 3.4 Exercise - Sentinel Hands-on | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    3.4 Exercise - Sentinel Hands-on

    Step 1

    If you still have the primary and replica instances we set up in the previous exercise (3.2) - great! We’ll reuse them to create our Sentinel setup. If not - refer back to the instructions and go through them again.

    When done, you will have a primary Redis instance with one replica.

    Step 2

    To initialise a Redis Sentinel, you need to provide a configuration file, so let’s go ahead and create one:

    $ touch sentinel1.conf

    Open the file and paste in the following settings:

    port 5000
    sentinel monitor myprimary 127.0.0.1 6379 2
    sentinel down-after-milliseconds myprimary 5000
    sentinel failover-timeout myprimary 60000
    sentinel auth-pass myprimary a_strong_password
    Breakdown of terms:
    • port - The port on which the Sentinel should run
    • sentinel monitor - monitor the Primary on a specific IP address and port. Having the address of the Primary the Sentinels will be able to discover all the replicas on their own. The last argument on this line is the number of Sentinels needed for quorum. In our example - the number is 2.
    • sentinel down-after-milliseconds - how many milliseconds should an instance be unreachable so that it’s considered down
    • sentinel failover-timeout - if a Sentinel voted another Sentinel for the failover of a given master, it will wait this many milliseconds to try to failover the same master again.
    • sentinel auth-pass - In order for Sentinels to connect to Redis server instances when they are configured with requirepass, the Sentinel configuration must include the sentinel auth-pass directive.

    Step 3

    Make two more copies of this file - sentinel2.conf and sentinel3.conf and edit them so that the PORT configuration is set to 5001 and 5002, respectively.

    Step 4

    Let’s initialise the three Sentinels in three different terminal tabs:

    # Tab 1
    $ redis-server ./sentinel1.conf --sentinel
    # Tab 2
    $ redis-server ./sentinel2.conf --sentinel
    # Tab3
    $ redis-server ./sentinel3.conf --sentinel

    Step 5

    If you connected to one of the Sentinels now you would be able to run many new commands that would give an error if run on a Redis instance. For example:

    # Provides information about the Primary
    SENTINEL master myprimary

    # Gives you information about the replicas connected to the Primary
    SENTINEL replicas myprimary

    # Provides information on the other Sentinels
    SENTINEL sentinels myprimary

    # Provides the IP address of the current Primary
    SENTINEL get-master-addr-by-name myprimary

    Step 6

    If we killed the primary Redis instance now by pressing Ctrl+C or by running the redis-cli -p 6379 DEBUG sleep 30 command, we’ll be able to observe in the Sentinels’ logs that the failover process will start in about 5 seconds. If you run the command that returns the IP address of the Primary again you will see that the replica has been promoted to a Primary:

    redis> SENTINEL get-master-addr-by-name myprimary
    1) "127.0.0.1"
    2) "6380"
    - + \ No newline at end of file diff --git a/operate/redis-at-scale/high-availability/index.html b/operate/redis-at-scale/high-availability/index.html index dc6df4d0ad..62ad12f611 100644 --- a/operate/redis-at-scale/high-availability/index.html +++ b/operate/redis-at-scale/high-availability/index.html @@ -4,7 +4,7 @@ Ensuring High Availability in Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Ensuring High Availability in Redis

    Hello World!

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/high-availability/introduction/index.html b/operate/redis-at-scale/high-availability/introduction/index.html index df88e722f5..704da329f5 100644 --- a/operate/redis-at-scale/high-availability/introduction/index.html +++ b/operate/redis-at-scale/high-availability/introduction/index.html @@ -4,7 +4,7 @@ 3.0 Introduction to High Availability | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    3.0 Introduction to High Availability



    High availability is a computing concept describing systems that guarantee a high level of uptime, designed to be fault-tolerant, highly dependable, operating continuously without intervention and without a single point of failure.

    What does this mean for Redis specifically? Well, it means that if your primary Redis server fails, a backup will kick in, and you, as a user, will see little to no disruption in the service. There are two components needed for this to be possible: replication and automatic failover.

    Replication is the continuous copying of data from a primary database to a backup, or a replica database. The two databases are usually located on different physical servers, so that we can have a functional copy of our data in case we lose the server where our primary database sits.

    But having a backup of our data is not enough for high availability. We also have to have a mechanism that will automatically kick in and redirect all requests towards the replica in the event that the primary fails. This mechanism is called automatic failover.

    In the rest of this section we’ll see how Redis handles replication and which automatic failover solutions it offers. Let’s dig in.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/high-availability/understanding-sentinels/index.html b/operate/redis-at-scale/high-availability/understanding-sentinels/index.html index ce66a213f7..a730117c5c 100644 --- a/operate/redis-at-scale/high-availability/understanding-sentinels/index.html +++ b/operate/redis-at-scale/high-availability/understanding-sentinels/index.html @@ -4,7 +4,7 @@ 3.3 Understanding Sentinels | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    3.3 Understanding Sentinels

    In the beginning of this unit, we learned that we can’t have high availability without replication and automatic failover. We covered replication in the previous two chapters, and now we’ll explain Sentinel - a tool that provides the automatic failover.

    Redis Sentinel is a distributed system consisting of multiple Redis instances started in sentinel mode. We call these instances Sentinels.

    The group of Sentinels monitors a primary Redis instance and its replicas. If the sentinels detect that the primary instance has failed, the sentinel processes will look for the replica that has the latest data and will promote that replica to be the new primary. This way, the clients talking to the database will be able to reconnect to the new primary and continue functioning as usual, with minimal disruption to the users.


    Sentinel Quorum Diagram

    Deciding that a primary instance is down

    In order for the Sentinels to be able to decide that a primary instance is down we need to have enough Sentinels agree that the server is unreachable from their point of view.

    Having a number of Sentinels agreeing that they need to take an action is called reaching a quorum. If the Sentinels can’t reach quorum, they cannot decide that the primary has failed. The exact number of Sentinels needed for quorum is configurable.

    Triggering a failover

    Once the Sentinels have decided that a primary instance is down, they need to elect and authorize a leader (a Sentinel instance) that will do the failover. A leader can only be chosen if the majority of the Sentinels agree on it.

    In the final step, the leader will reconfigure the chosen replica to become a primary by sending the command REPLICAOF NO ONE and it will reconfigure the other replicas to follow the newly promoted primary.

    Sentinel and Client Libraries

    If you have a system that uses Sentinel for high availability, then you need to have a client that supports Sentinel. Not all libraries have this feature, but most of the popular ones do, so make sure you add it to your list of requirements when choosing your library.

    Further Reading

    For more information on Redis Sentinel, check out the documentation on redis.io.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/index.html b/operate/redis-at-scale/index.html index 760ccd336b..bc600b6128 100644 --- a/operate/redis-at-scale/index.html +++ b/operate/redis-at-scale/index.html @@ -4,7 +4,7 @@ Introduction to Running Redis at Scale | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    Introduction to Running Redis at Scale


    Profile picture for Justin Castilla
    Author:
    Justin Castilla, Senior Developer Advocate at Redis
    Profile picture for Elena Kolevska
    Author:
    Elena Kolevska, Technical Enablement Manager, EMEA at Redis
    Profile picture for Kurt Moeller
    Author:
    Kurt Moeller, Technical Enablement Manager, US at Redis

    Welcome



    The world's data is growing exponentially. That exponential growth means that database systems must scale. This is a course about running Redis, one of the most popular databases, at scale.

    So, how do you run Redis at scale? There are two general answers to this question, and it's important that we address them right away. That's because the easiest and most common way to run Redis at scale is to let someone else manage your Redis deployment for you.

    The convenience of "database-as-a-service" offerings means that you don't have to know much about how your database scales, and that saves a lot of time and potential false starts.

    We at Redis offer Redis Cloud, a highly available cloud-based Redis service that provides a lot of features you can't find anywhere else, like active-active, geo-distribution.

    Redis Cloud is also really easy to use and has a free tier so you can get going quickly. So, that's the first answer. To run Redis these days, you might just use a fully-managed offering like Redis Cloud. But not everyone can or wants to use a cloud-hosted database.

    There are a bunch of reasons for this. For example, maybe you're a large enterprise with your own data centers and dedicated ops teams. Or perhaps you're a mission-critical application whose SLAs are so rigid that you need to be able to dig deeply into any potential performance issue. This often rules out cloud-based deployments, since the cloud hides away the hardware and networks you're operating in. In this case, you're deploying Redis on your own. And for that, you need to know how Redis scales.

    Learning this isn't just useful; it's also genuinely interesting. Sharding, replication, high availability, and disaster recovery are all important concepts that anyone can understand with the right explanation. These concepts aren't rocket science. They're no harder to understand than basic high school math, and knowing about them makes you a better developer.In this course, we'll look closely at how open source Redis scales. And you'll learn by doing, as we present a lot of the ideas through hands-on labs.

    These ideas will apply whether you're deploying open source Redis on your own or managing a Redis Enterprise cluster - which is, ultimately, what you'll want to reach for if you ever outgrow open source Redis. These are some important topics to consider during your time with this course. But let's first learn how to walk before we run.

    We sincerely hope you enjoy what you learn with us about scaling Redis, and as always, it's my pleasure to help.

    Course Overview

    This course is broken up into units covering topics around scaling Redis for production deployment.

    Scaling means more than just performance. We have tried to identify key topics that will help you have a performant, stable, and secure deployment of Redis. This course is divided into the following units:

    • Talking to Redis: connection management and tuning of Redis.
    • Persistence/Durability: options persisting Redis data to disk.
    • High Availability: how to make sure Redis and your data is always there.
    • Scalability: scaling Redis for both higher throughput and capacity.
    • Observability: Visibility into your Redis deployment (metrics, etc.).

    Our goal is to give you all the information you need to run Redis at scale, in whichever way is best for your organization. We hope you enjoy the course, and please don't hesitate to reach out on the course Discord channel if you have any questions along the way.

    Prerequisites

    • Access to a Linux-based system and familiarity with it
    • Redis server and redis-cli installed (examples and exercises assume redis-server is in the $PATH) docker and docker-compose installed
    • A git client and access to clone repos in Github. Some exercises will come from the following repository: https://github.com/redislabs-training/ru301
    note

    This repo contains sample demonstrations of Redis running in various scaled configurations, and is not directly correlated with all of the exercises in this course. See the specific exercise instructions for usage.

    Assumptions

    • Comfortable with Linux Bash shell exercises
    • Legacy terminology in Redis uses 'master' and 'slave' but in the course we will use 'primary' and 'replica'. You will still see the legacy terms in many commands, configurations, and field names.
    • We will use $ to indicate command line prompt and > to indicate a redis-cli prompt
    - + \ No newline at end of file diff --git a/operate/redis-at-scale/observability/data-points-in-redis/index.html b/operate/redis-at-scale/observability/data-points-in-redis/index.html index 13b0a96eb2..e659a660a3 100644 --- a/operate/redis-at-scale/observability/data-points-in-redis/index.html +++ b/operate/redis-at-scale/observability/data-points-in-redis/index.html @@ -4,7 +4,7 @@ 5.1 Data points in Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    5.1 Data points in Redis

    There are several Redis metrics that can be viewed through redis-cli.

    Redis INFO command

    Running the INFO command provides many of the metrics available in a single view.

    127.0.0.1:6379> INFO
    # Server
    redis_version:6.0.1
    redis_git_sha1:00000000
    redis_git_dirty:0
    redis_build_id:e02d1d807e41d65
    redis_mode:standalone
    os:Linux 4.19.121-linuxkit x86_64

    There are several sections that can be pulled individually. For example, if you wanted to just get the CLIENTS section you can pass that section as an argument to the INFO command.

    127.0.0.1:6379> INFO CLIENTS
    # Clients
    connected_clients:1
    client_recent_max_input_buffer:2
    client_recent_max_output_buffer:0
    blocked_clients:0
    tracking_clients:0
    clients_in_timeout_table:0

    Sections

    Server: the current Redis server info.

    Metrics of note:

    • redis_version
    • process_id
    • config_file
    • uptime_in_seconds
    • uptime_in_days

    Clients: available data on clients connected or failed connections.

    Metrics of note:

    • connected_clients
    • blocked_clients

    Memory: memory usage and stats

    Metrics of note:

    • used_memory
    • mem_fragmentation_ratio

    Persistence: RDB or AOF metrics

    Metrics of note:

    • rdb_last_save_time
    • rdb_changes_since_last_save
    • aof_rewrite_in_progress

    Stats: some general statistics

    Metrics of note:

    • keyspace_hits
    • keyspace_misses
    • expired_keys
    • evicted_keys
    • instantaneous_ops_per_sec

    Replication: replication data including primary/replica identifiers and offsets

    Metrics of note:

    • master_link_down_since
    • connected_slaves
    • master_last_io_seconds_ago

    CPU: compute consumption stats

    Metrics of note:

    • used_cpu_sys
    • used_cpu_user

    Modules: data from any loaded modules

    Metrics of note (per module):

    • ver
    • options

    Cluster: whether cluster is enabled

    Metric of note:

    • cluster_enabled

    Keyspace: keys and expiration data

    Metrics of note (per db):

    • keys
    • expires
    • avg_ttl

    The output can be read from the results or piped into a file.

    127.0.0.1:6379> redis-cli INFO STATS > redis-info-stats

    This could be done at intervals and consumed by a local or third party monitoring service.

    Some of the data returned by INFO are going to be static.  For example the Redis version which won't change until an update is made.  Other data is dynamic, for keyspace_hits ÷ keyspace_misses. The latter could be taken to compute a hit ratio and observed as a long term metric. The replication section field master_link_down_since could be a metric to connect an alert.

    Some examples of possible alerts that could be setup for a given metric:

    MetricExample Alert
    uptime_in_seconds< 300 seconds: to ensure the server is staying up
    connected_clients< minimum number of expected application connections
    master_link_down_since> 30 seconds: replication should be operational
    rdb_last_save_time> maximum acceptable interval without taking a snapshot
    note

    This is not an exhaustive list, but just to give you an idea of how the metrics in INFO could be used.

    Latency and stats data via redis-cli options

    The redis-cli client has some built-in options that allow you to pull some real-time latency and stats data.

    note

    These are not available as commands from Redis but as options in redis-cli.

    Latency options:

    Continuously sample latency:

    $ redis-cli --latency
    min: 1, max: 17, avg: 4.03 (927 samples)

    The raw or csv output flag can be added:

    $ redis-cli --latency --csv
    1,4,1.94,78

    In order to sample for longer than one second you can use latency-history which has a default interval of 15 seconds but can be specified using the -i param.

    $ redis-cli --latency-history -i 60
    min: 1, max: 30, avg: 4.84 (328 samples)

    This can also be combined with the csv or raw output format flag.

    $ redis-cli --latency-history -i 60 --csv
    13,13,13.00,1
    5,13,9.00,2
    3,13,7.00,3
    3,13,6.00,4
    3,13,5.60,5
    2,13,5.00,6
    2,13,5.43,7
    2,13,5.62,8
    2,13,5.22,9
    2,13,5.00,10
    1,13,4.64,11

    Both of these could be piped to a file as well.

    The latency-dist option shows latency as a spectrum. The default interval is one second but can be changed using the -i param.

    Latency Distribution diagramStats option:

    Get rolling stats from the server using the stat flag.

    $ redis-cli --stat
    ------- data ------ --------------------- load -------------------- - child -
    keys mem clients blocked requests connections
    4 9.98M 51 0 8168035 (+0) 4132
    4 9.98M 51 0 8181542 (+13507) 4132
    4 9.98M 51 0 8196100 (+14558) 4132
    4 9.98M 51 0 8209794 (+13694) 4132
    4 9.98M 51 0 8223420 (+13626) 4132
    4 9.98M 51 0 8236624 (+13204) 4132
    4 9.98M 51 0 8251376 (+14752) 4132
    4 9.98M 51 0 8263417 (+12041) 4182
    4 9.98M 51 0 8276781 (+13364) 4182
    4 9.90M 51 0 8289693 (+12912) 4182

    Memory stats

    Redis includes a MEMORY command that includes a subcommand to get stats.

    127.0.0.1:6379> memory stats
    1) "peak.allocated"
    2) (integer) 11912984
    3) "total.allocated"
    4) (integer) 8379168
    5) "startup.allocated"
    6) (integer) 5292168
    7) "replication.backlog"
    8) (integer) 0
    9) "clients.slaves"
    10) (integer) 0
    11) "clients.normal"
    12) (integer) 16986
    13) "aof.buffer"
    14) (integer) 0

    These values are available in the INFO MEMORY command as well, but here they are returned in a typical Redis RESP Array reply.

    There is also a LATENCY DOCTOR subcommand with an analysis report of the current memory metrics.

    Latency Monitoring

    As we know Redis is fast and as a result is often used in very extreme scenarios where low latency is a must. Redis has a feature called Latency Monitoring which allows you to dig into possible latency issues. Latency monitoring is composed of the following conceptual parts:

    • Latency hooks that sample different latency sensitive code paths.
    • Time series recording of latency spikes split by different events.
    • A reporting engine to fetch raw data from the time series.
    • Analysis engine to provide human readable reports and hints according to the measurements.

    By default this feature is disabled because most of the time it is not needed. In order to enable it you can update the threshold time in milliseconds that you want to monitor in your Redis configuration. Events that take longer than the threshold will be logged as latency spikes. The threshold configuration should be set accordingly if the requirement is to identify all events blocking the server for a time of 10 milliseconds or more.

    latency-monitor-threshold 10

    If the debugging session is intended to be temporary the threshold can be set via redis-cli.

    127.0.0.1:6379> CONFIG SET latency-monitor-threshold 10

    To disable the latency framework the threshold should be set back to 0.

    127.0.0.1:6379> CONFIG SET latency-monitor-threshold 0

    The latency data can be viewed using the LATENCY command with it's subcommands:

    • LATENCY LATEST - latest samples for all events
    • LATENCY HISTORY - latest time series for a given event
    • LATENCY RESET - resets the time series data
    • LATENCY GRAPH - renders an ASCII-art graph
    • LATENCY DOCTOR - analysis report

    In order to make use of these commands you need to make yourself familiar with the different events that the latency monitoring framework is tracking. (taken from https://redis.io/topics/latency-monitor" )

    EventDescription
    commandregular commands
    fast-commandO(1) and O(log N) commands
    forkthe fork(2) system call
    comrdb-unlink-temp-filethe unlink(2) system call
    aof-writewriting to the AOF - a catchall event fsync(2) system calls
    aof-fsync-alwaysthe fsync(2) system call when invoked by the appendfsync allways policy
    aof-write-pending-fsyncthe fsync(2) system call when there are pending writes
    aof-write-active-childthe fsync(2) system call when performed by a child process
    aof-write-alonethe fsync(2) system call when performed by the main process
    aof-fstatthe fstat(2) system call
    aof-renamethe rename(2) system call for renaming the temporary file after completing BGREWRITEAOF
    aof-rewrite-diff-writewriting the differences accumulated while performing BGREWRITEAOF
    active-defrag-cyclethe active defragmentation cycle
    expire-cyclethe expiration cycle
    eviction-cyclethe eviction cycle
    eviction-deldeletes during the eviction cycle

    For example, you can use the LATENCY LATEST subcommand and you may see some data like this:

    127.0.0.1:6379> latency latest
    1) 1) "command"
    2) (integer) 1616372606
    3) (integer) 600
    4) (integer) 600
    2) 1) "fast-command"
    2) (integer) 1616372434
    3) (integer) 12
    4) (integer) 12

    The results of this command provide the timestamp, latency and max latency for this event. Utilizing the events table above I can see we had latency spikes for a regular command with the latest and max latency of 600 MS while a O(1) or O(log N) command had a latency spike of 12 MS.

    Some of the latency commands require a specific event be passed.

    127.0.0.1:6379> latency graph command
    command - high 600 ms, low 100 ms (all time high 600 ms)
    --------------------------------------------------------------------------------
    _##
    o|||
    o||||
    _#|||||

    3222184
    05308ss
    sssss

    While the cost of enabling latency monitoring is near zero and memory requirements are very small it will raise your baseline memory usage so if you are getting the required performance out of Redis there is no need to leave this enabled.

    Monitoring Tools

    There are many open source monitoring tools and services to visualize your Redis metrics - some of which also provide alerting capabilities.

    One example of this is the Redis Data Source for Grafana. It is a Grafana plug-in that allows users to connect to the Redis database and build dashboards to easily observe Redis data. It provides an out-of-the-box predefined dashboard but also lets you build customized dashboards tuned to your specific needs.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/observability/exercise-1/index.html b/operate/redis-at-scale/observability/exercise-1/index.html index cd78211d61..b2ef878909 100644 --- a/operate/redis-at-scale/observability/exercise-1/index.html +++ b/operate/redis-at-scale/observability/exercise-1/index.html @@ -4,7 +4,7 @@ 5.2 Getting Redis Statistics | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    5.2 Getting Redis Statistics

    Clone this repo if you have not already: https://github.com/redislabs-training/ru301

    Change into the observability-stats directory.

    Requirements
    • docker
    • docker-compose
    • internet connection
    Starting Environment
    $ docker-compose up -d
    Connect to the Environment

    In a terminal run this command to get a shell prompt inside the running Docker container:

    $ docker-compose exec redis_stats bash
    Generate load

    A simple way to to generate some load is to open another terminal and run:

    $ docker-compose exec redis_stats redis-benchmark
    Info

    Since most of the stats data comes from the INFO command you should first run this to view that there.

    $ redis-cli INFO

    Try piping this output to a file.

    Memory usage

    Since we generally recommend setting the maxmemory size, it is possible to calculate the percentage of memory in use and alert based on results of the maxmemory configuration value and the used_memory stat.

    First set the maxmemory.

    $ redis-cli config set maxmemory 100000

    Then you can pull the two data points to see how that could be used to calculate memory usage.

    $ redis-cli INFO | grep used_memory:
    $ redis-cli CONFIG GET maxmemory
    Client data

    You can pull the clients section of the INFO command:

    $ redis-cli info clients

    or maybe a particular metric you would want to track:

    $ redis-cli info clients | grep connected_clients
    Stats section

    Use redis-cli to list the full 'stats' section.

    Hit ratio

    A cache hit/miss ratio could be generated using two data points in the stats section.

    $ redis-cli INFO stats | grep keyspace
    Evicted keys

    Eviction occurs when Redis has reached its maximum memory and maxmemory-policy in redis.conf is set to something other than volatile-lru.

    $ redis-cli INFO stats | grep evicted_keys
    Expired keys

    It is a good idea to keep an eye on the expirations to make sure Redis is performing as expected.

    $ redis-cli INFO stats | grep expired_keys
    Keyspace

    The following data could be used for graphing the size of the keyspace as a quick drop or spike in the number of keys is a good indicator of issues.

    $ redis-cli INFO keyspace
    Workload (connections received, commands processed)

    The following stats are a good indicator of workload on the Redis server.

    $ redis-cli INFO stats | egrep "^total_"
    - + \ No newline at end of file diff --git a/operate/redis-at-scale/observability/identifying-issues/index.html b/operate/redis-at-scale/observability/identifying-issues/index.html index 9c8f18f467..e6bbf24ac3 100644 --- a/operate/redis-at-scale/observability/identifying-issues/index.html +++ b/operate/redis-at-scale/observability/identifying-issues/index.html @@ -4,7 +4,7 @@ 5.3 Identifying Issues | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    5.3 Identifying Issues

    Besides the metrics from the data points from info, memory and the latency framework in the sections above, you may need to pull data from other sources when troubleshooting.

    Availability

    The Redis server will respond to the PING command when running properly:

    $ redis-cli -h redis.example.com -p 6379 PING
    PONG

    Slow Log

    Redis Slow Log is a system to log queries that exceed a specific execution time which does not include I/O operations like client communication. It is enabled by default with two configuration parameters.

    slowlog-log-slower-than 1000000

    This indicates if there is an execution time longer than the time in microseconds, in this case one second, it will be logged. The slow log can be disabled using a value of -1. It can also be set to log every command with a value of 0.

    slowlog-max-len 128

    This sets the length of the slow log. When a new command is logged the oldest one is removed from the queue.

    These values can also be changed at runtime using the CONFIG SET command.

    You can view the current length of the slow log using the LEN subcommand:

    redis.cloud:6379> slowlog len
    (integer) 11

    Entries can be pulled off of the slow log using the GET subcommand.

    redis.cloud:6379> slowlog get 2
    1) 1) (integer) 10
    2) (integer) 1616372606
    3) (integer) 600406
    4) 1) "debug"
    2) "sleep"
    3) ".6"
    5) "172.17.0.1:60546"
    6) ""
    2) 1) (integer) 9
    2) (integer) 1616372602
    3) (integer) 600565
    4) 1) "debug"
    2) "sleep"
    3) ".6"
    5) "172.17.0.1:60546"
    6) ""

    The slow log can be reset using the RESET subcommand.

    redis.cloud:6379> slowlog reset
    OK
    redis.cloud:6379> slowlog len
    (integer) 0

    Scanning keys

    There are a few options that can be passed to redis-cli that will trigger a keyspace analysis. They use the SCAN command so they should be safe to run without impacting operations. You can see in the output of all of them there is a throttling option if needed.


    Big Keys: This option will scan the dataset for big keys and provide information about them.

    $ redis-cli --bigkeys

    # Scanning the entire keyspace to find biggest keys as well as
    # average sizes per key type. You can use -i 0.1 to sleep 0.1 sec
    # per 100 SCAN commands (not usually needed).

    [00.00%] Biggest string found so far '"counter:__rand_int__"' with 6 bytes
    [00.00%] Biggest hash found so far '"myhash"' with 1 fields
    [00.00%] Biggest list found so far '"mylist"' with 200000 items

    -------- summary -------

    Sampled 4 keys in the keyspace!
    Total key length in bytes is 48 (avg len 12.00)

    Biggest list found '"mylist"' has 200000 items
    Biggest hash found '"myhash"' has 1 fields
    Biggest string found '"counter:__rand_int__"' has 6 bytes

    1 lists with 200000 items (25.00% of keys, avg size 200000.00)
    1 hashs with 1 fields (25.00% of keys, avg size 1.00)
    2 strings with 9 bytes (50.00% of keys, avg size 4.50)
    0 streams with 0 entries (00.00% of keys, avg size 0.00)
    0 sets with 0 members (00.00% of keys, avg size 0.00)
    0 zsets with 0 members (00.00% of keys, avg size 0.00)

    Mem Keys: Similarly to big keys, mem keys will look for the biggest keys but also report on the average sizes.

    $ redis-cli --memkeys

    # Scanning the entire keyspace to find biggest keys as well as
    # average sizes per key type. You can use -i 0.1 to sleep 0.1 sec
    # per 100 SCAN commands (not usually needed).

    [00.00%] Biggest string found so far '"counter:__rand_int__"' with 62 bytes
    [00.00%] Biggest string found so far '"key:__rand_int__"' with 63 bytes
    [00.00%] Biggest hash found so far '"myhash"' with 86 bytes
    [00.00%] Biggest list found so far '"mylist"' with 860473 bytes

    -------- summary -------

    Sampled 4 keys in the keyspace!
    Total key length in bytes is 48 (avg len 12.00)

    Biggest list found '"mylist"' has 860473 bytes
    Biggest hash found '"myhash"' has 86 bytes
    Biggest string found '"key:__rand_int__"' has 63 bytes

    1 lists with 860473 bytes (25.00% of keys, avg size 860473.00)
    1 hashs with 86 bytes (25.00% of keys, avg size 86.00)
    2 strings with 125 bytes (50.00% of keys, avg size 62.50)
    0 streams with 0 bytes (00.00% of keys, avg size 0.00)
    0 sets with 0 bytes (00.00% of keys, avg size 0.00)
    0 zsets with 0 bytes (00.00% of keys, avg size 0.00)

    Hot Keys: The hot keys scan is only available when the maxmemory-policy is set to volatile-lfu or allkeys-lfu. If you need to identity hot keys you can add this argument to redis-cli.

    $ redis-cli --hotkeys

    # Scanning the entire keyspace to find hot keys as well as
    # average sizes per key type. You can use -i 0.1 to sleep 0.1 sec
    # per 100 SCAN commands (not usually needed).

    [00.00%] Hot key '"key:__rand_int__"' found so far with counter 37

    -------- summary -------

    Sampled 5 keys in the keyspace!
    hot key found with counter: 37 keyname: "key:__rand_int__"

    Monitor: The MONITOR command allows you to see a stream of every command running against your Redis instance.

    127.0.0.1:6379 > monitor
    OK
    1616541192.039933 [0 127.0.0.1:57070] "PING"
    1616541276.052331 [0 127.0.0.1:57098] "set" "user:2398423hu" "KutMo"
    caution

    Since MONITOR streams back all commands, its use comes at a cost. It has been known to reduce performance by up to 50% so use with caution!

    Setting up and using the Redis Log File

    The Redis log file is the other important log you need to be aware of. It contains useful information for troubleshooting configuration and deployment errors. If you don't configure Redis logging, troubleshooting will be significantly harder.

    Redis has four logging levels, which you can configure directly in redis.conf file.

    Log Levels:

    • WARNING
    • NOTICE
    • VERBOSE
    • DEBUG

    Redis also supports sending the log files to a remote logging server through the use of syslog.

    Remote logging is important to many security professionals. These remote logging servers are frequently used to monitor security events and manage incidents. These centralized log servers perform three common functions: ensure the integrity of your log files, ensure that logs are retained for a specific period of time, and to correlate logs against other system logs to discover potential attacks on your infrastructure.

    Let's set up logging on our Redis deployment. First we'll open our redis.conf file:

    $ sudo vi /etc/redis/redis.conf

    The redis.conf file has an entire section dedicated to logging.

    First, find the logfile directive in the redis.conf file. This will allow you to define the logging directory. For this example lets use /var/log/redis/redis.log.

    If you'd like to use a remote logging server, then you'll need to uncomment the lines syslog-enabled, syslog-ident and syslog-facility, and ensure that syslog-enabled is set to yes.

    Next, we'll restart the Redis server.

    You should see the log events indicating that Redis is starting.

    $ sudo tail -f /var/log/redis/redis.log

    And next let's check that we are properly writing to syslog. You should see these same logs.

    $ less /var/log/syslog | grep redis

    Finally, you’ll need to send your logs to your remote logging server to ensure your logs will be backed up to this server. To do this, you’ll also have to modify the rsyslog configuration. This configuration varies depending on your remote logging server provider.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/observability/index.html b/operate/redis-at-scale/observability/index.html index 7c215da8b7..f92ab9f343 100644 --- a/operate/redis-at-scale/observability/index.html +++ b/operate/redis-at-scale/observability/index.html @@ -4,7 +4,7 @@ Ensuring Observability in Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Ensuring Observability in Redis

    Hello World! Observability

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/observability/introduction/index.html b/operate/redis-at-scale/observability/introduction/index.html index 332e62a9b5..0bd5c746f3 100644 --- a/operate/redis-at-scale/observability/introduction/index.html +++ b/operate/redis-at-scale/observability/introduction/index.html @@ -4,7 +4,7 @@ 5.0 Introduction to Observability | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    5.0 Introduction to Observability



    The last thing you want to do after successfully deploying and scaling Redis is to be stuck working on the weekend because performance is down or the service is unavailable!

    If you're running a managed service like Redis Cloud, you won't have to worry about these questions as much. But even then, it's still worthwhile to know about certain key Redis metrics.

    Some of the question you always want to be able to answer include:

    • Is Redis up and running right now?
    • Where is my Redis capacity at?
    • Is Redis accessible at this moment?
    • Is Redis performing the way we expect?
    • When failures occur… what exactly happened to Redis?

    Then of course you must ask...

    • How can I find this out ahead of time?

    Let's dig into these questions and more as we look into observability with Redis.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/persistence-and-durability/exercise/index.html b/operate/redis-at-scale/persistence-and-durability/exercise/index.html index 10c2d616fd..c692b50ca9 100644 --- a/operate/redis-at-scale/persistence-and-durability/exercise/index.html +++ b/operate/redis-at-scale/persistence-and-durability/exercise/index.html @@ -4,7 +4,7 @@ 2.2 Exercise: Saving a Snapshot | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    2.2 Exercise: Saving a Snapshot

    As we learned in the previous unit, Redis will save a snapshot of your database every hour if at least one key has changed, every five minutes if at least 100 keys have changed, or every 60 seconds if at least 10000 keys have changed.

    Let’s update this to a simplified hypothetical scenario where we want to save a snapshot if three keys have been modified in 20 seconds.

    Step 1

    Create a directory named 2.2 and in it prepare a redis.conf file.

    $ mkdir 2.2
    $ cd 2.2
    $ vim redis.conf

    The redis.conf file should specify a filename that will be used for the rdb file and a directive that will trigger the creation of a snapshot if 3 keys have been modified in 20 seconds, as described above.

    dbfilename my_backup_file.rdb
    save 20 3

    Step 2

    In the 2.2 directory, start a Redis server - passing it the redis.conf configuration file you just created.

    $ redis-server ./redis.conf

    In a separate terminal tab use the redis-cli to create three random keys, one after the other. For example:

    127.0.0.1:6379> SET a 1
    127.0.0.1:6379> SET b 2
    127.0.0.1:6379> SET c 3

    Run the ls command in the first terminal to list all the files in the 2.2 directory. What changed?

    Step 3

    Now we’re ready to take our persistence a level higher and set up an AOF file. Modify your redis.conf file so that the server will log every new write command and force writing it to disk.

    Be careful! We have a running server and we want this configuration to be applied without restarting it.

    127.0.0.1:6379> CONFIG SET appendonly yes
    127.0.0.1:6379> CONFIG SET appendfsync always

    In order for these settings to be persisted to the redis.conf file we need to save them:

    127.0.0.1:6379> CONFIG REWRITE

    Step 4

    Create a few random keys through redis-cli. Check the contents of the directory 2.2 again. What changed?

    Step 5

    As a final step, restart the Redis server process (you can press Ctrl+C in the terminal to stop the process and re-run it again). If you run the SCAN 0 command you will see that all the keys you created are still in the database, even though we restarted the process.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/persistence-and-durability/index.html b/operate/redis-at-scale/persistence-and-durability/index.html index 56ac514a3e..a03a51476a 100644 --- a/operate/redis-at-scale/persistence-and-durability/index.html +++ b/operate/redis-at-scale/persistence-and-durability/index.html @@ -4,7 +4,7 @@ Ensuring Persistence & Durability in Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Ensuring Persistence & Durability in Redis

    Hello World! Persistence & Durability

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/persistence-and-durability/introduction/index.html b/operate/redis-at-scale/persistence-and-durability/introduction/index.html index a03a98315f..95ae3b0d5d 100644 --- a/operate/redis-at-scale/persistence-and-durability/introduction/index.html +++ b/operate/redis-at-scale/persistence-and-durability/introduction/index.html @@ -4,7 +4,7 @@ 2.0 Introduction to Persistence and Durability | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    2.0 Introduction to Persistence and Durability



    Hello! Congrats on completing Section 1. Section 2 is a bit shorter but contains some important information on persistence and durability.

    As I am sure you know, Redis serves all data directly from memory. But Redis is also capable of persisting data to disk. Persistence preserves data in the event of a server restart.

    In the following video and exercise, we'll look at the options for persisting data to disk. We'll show you how to enable persistence, and you'll then do a hands-on exercise setting up snapshots of your Redis instance.

    Good luck, and we'll see you in the next sections.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/persistence-and-durability/persistence-options-in-redis/index.html b/operate/redis-at-scale/persistence-and-durability/persistence-options-in-redis/index.html index 2058d5d91e..93b4b91632 100644 --- a/operate/redis-at-scale/persistence-and-durability/persistence-options-in-redis/index.html +++ b/operate/redis-at-scale/persistence-and-durability/persistence-options-in-redis/index.html @@ -4,7 +4,7 @@ 2.1 Persistence options in Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    2.1 Persistence options in Redis



    If a Redis server that only stores data in RAM is restarted, all data is lost. To prevent such data loss, there needs to be some mechanism for persisting the data to disk; Redis provides two of them: snapshotting and anappend-only file, or AOF. You can configure your Redis instances to use either of the two, or a combination of both.

    When a snapshot is created, the entire point-in-time view of the dataset is written to persistent storage in a compact .rdb file. You can set up recurring backups, for example every 1, 12, or 24 hours and use these backups to easily restore different versions of the data set in case of disasters. You can also use these snapshots to create a clone of the server, or simply leave them in place for a future restart.

    Creating a .rdb file requires a lot of disk I/O. If performed in the main Redis process, this would reduce the server’s performance. That’s why this work is done by a forked child process. But even forking can be time-consuming if the dataset is large. This may result in decreased performance or in Redis failing to serve clients for a few milliseconds or even up to a second for very large datasets. Understanding this should help you decide whether this solution makes sense for your requirements.

    You can configure the name and location of the .rdb file with the dbfilename and dir configuration directives, either through the redis.conf file, or through the redis-cli as explained in Section 1 Unit 2. And of course you can configure how often you want to create a snapshot. Here’s an excerpt from the redis.conf file showing the default values.

    As an example, this configuration will make Redis automatically dump the dataset to disk every 60 seconds if at least 1000 keys changed in that period. While snapshotting is a great strategy for the use cases explained above, it leaves a huge possibility for data loss. You can configure snapshots to run every few minutes, or after X writes against the database, but if the server crashes you lose all the writes since the last snapshot was taken. In many use cases, that kind of data loss can be acceptable, but in many others it is absolutely not. For all of those other use cases Redis offers the AOF persistence option.

    AOF, or append-only file works by logging every incoming write command to disk as it happens. These commands can then be replayed at server startup, to reconstruct the original dataset. Commands are logged using the same format as the Redis protocol itself, in an append-only fashion. The AOF approach provides greater durability than snapshotting, and allows you to configure how often file syncs happen.

    Depending on your durability requirements (or how much data you can afford to lose), you can choose which fsync policy is the best for your use case:

    • fsync every write: The safest policy: The write is acknowledged to the client only after it has been written to the AOF file and flushed to disk. Since in this approach we are writing to disk synchronously, we can expect a much higher latency than usual.
    • fsync every second: The default policy. Fsync is performed asynchronously, in a background thread, so write performance is still high. Choose this option if you need high performance and can afford to lose up to one second worth of writes.
    • no fsync: In this case Redis will log the command to the file descriptor, but will not force the OS to flush the data to disk. If the OS crashes we can lose a few seconds of data (Normally Linux will flush data every 30 seconds with this configuration, but it's up to the kernel’s exact tuning.).

    The relevant configuration directives for AOF are shown on the screen. AOF contains a log of all the operations that modified the database in a format that’s easy to understand and parse. When the file gets too big, Redis can automatically rewrite it in the background, compacting it in a way that only the latest state of the data is preserved.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/scalability/exercise-1/index.html b/operate/redis-at-scale/scalability/exercise-1/index.html index 3d12bcc7d5..a6dc83ae29 100644 --- a/operate/redis-at-scale/scalability/exercise-1/index.html +++ b/operate/redis-at-scale/scalability/exercise-1/index.html @@ -4,7 +4,7 @@ 4.1 Exercise - Creating a Redis Cluster | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    4.1 Exercise - Creating a Redis Cluster

    Step 1

    To create a cluster, we need to spin up a few empty Redis instances and configure them to run in cluster mode.

    Here’s a minimal configuration file for Redis Cluster:

    # redis.conf file
    port 7000
    cluster-enabled yes
    cluster-config-file nodes.conf
    cluster-node-timeout 5000
    appendonly yes

    On the first line we specify the port on which the server should run, then we state that we want the server to run in cluster mode, with the cluster-enabled yes directive. cluster-config-file defines the name of the file where the configuration for this node is stored, in case of a server restart. Finally, cluster-node-timeout is the number of milliseconds a node must be unreachable for it to be considered in failure state.

    Step 2

    Let’s create a cluster on your localhost with three primary shards and three replicas (remember, in production always use two replicas to protect against a split-brain situation). We’ll need to bring up six Redis processes and create a redis.conf file for each of them, specifying their port and the rest of the configuration directives above.

    First, create six directories:

    mkdir -p {7000..7005}

    Step 3

    Then create the minimal configuration redis.conf file from above in each one of them, making sure you change the port directive to match the directory name.

    To copy the initial redis.conf file to each folder, run the following:

    for i in {7000..7005}; do cp redis.conf $i; done

    You should end up with the following directory structure:

    - 7000
    - redis.conf
    - 7001
    - redis.conf
    - 7002
    - redis.conf
    - 7003
    - redis.conf
    - 7004
    - redis.conf
    - 7005
    - redis.conf

    Step 4

    Open six terminal tabs and start the servers by going into each one of the directories and starting a Redis instance:

    # Terminal tab 1
    cd 7000
    /path/to/redis-server ./redis.conf
    # Terminal tab 2
    cd 7001
    /path/to/redis-server ./redis.conf
    ... and so on.

    Step 5

    Now that you have six empty Redis servers running, you can join them in a cluster:

    redis-cli --cluster create 127.0.0.1:7000 127.0.0.1:7001 \
    127.0.0.1:7002 127.0.0.1:7003 127.0.0.1:7004 127.0.0.1:7005 \
    --cluster-replicas 1

    Here we list the ports and IP addresses of all six servers and use the create command to instruct Redis to join them in a cluster, creating one replica for each primary. redis-cli will propose a configuration; accept it by typing yes. The cluster will be configured and joined, which means, instances will be bootstrapped into talking with each other.

    Finally, you should see a message saying:

    [OK] All 16384 slots covered

    This means that there is at least a master instance serving each of the 16384 slots available.

    Step 6

    Let’s add a new shard to the cluster, which is something you might do when you need to scale.

    First, as before, we need to start two new empty Redis instances (primary and its replica) in cluster mode. We create new directories 7006 and 7007 and in them we copy the same redis.conf file we used before, making sure we change the port directive in them to the appropriate port (7006 and 7007).

    $ mkdir 7006 7007
    $ cp 7000/redis.conf 7006/redis.conf
    $ cp 7000/redis.conf 7007/redis.conf

    Update the port numbers in the files ./7006/redis.conf and ./7007/redis.conf to 7006 and 7007, respectively.

    Step 7

    Let’s start the Redis instances:

    # Terminal tab 7
    $ cd 7006
    $ redis-server ./redis.conf
    # Terminal tab 8
    $ cd 7007
    $ redis-server ./redis.conf

    Step 8

    In the next step we join the new primary shard to the cluster with the add-node command. The first parameter is the address of the new shard, and the second parameter is the address of any of the current shards in the cluster.

    redis-cli --cluster add-node 127.0.0.1:7006 127.0.0.1:7000
    note

    The Redis commands use the term “Nodes” for what we call “Shards” in this training, so a command named “add-node” would mean “add a shard”.

    Step 9

    Finally we need to join the new replica shard, with the same add-node command, and a few extra arguments indicating the shard is joining as a replica and what will be its primary shard. If we don’t specify a primary shard Redis will assign one itself.

    We can find the IDs of our shards by running the cluster nodes command on any of the shards:

    $ redis-cli -p 7000 cluster nodes
    46a768cfeadb9d2aee91ddd882433a1798f53271 127.0.0.1:7006@17006 master - 0 1616754504000 0 connected
    1f2bc068c7ccc9e408161bd51b695a9a47b890b2 127.0.0.1:7003@17003 slave a138f48fe038b93ea2e186e7a5962fb1fa6e34fa 0 1616754504551 3 connected
    5b4e4be56158cf6103ffa3035024a8d820337973 127.0.0.1:7001@17001 master - 0 1616754505584 2 connected 5461-10922
    a138f48fe038b93ea2e186e7a5962fb1fa6e34fa 127.0.0.1:7002@17002 master - 0 1616754505000 3 connected 10923-16383
    71e078dab649166dcbbcec51520742bc7a5c1992 127.0.0.1:7005@17005 slave 5b4e4be56158cf6103ffa3035024a8d820337973 0 1616754505584 2 connected
    f224ecabedf39d1fffb34fb6c1683f8252f3b7dc 127.0.0.1:7000@17000 myself,master - 0 1616754502000 1 connected 0-5460
    04d71d5eb200353713da475c5c4f0a4253295aa4 127.0.0.1:7004@17004 slave f224ecabedf39d1fffb34fb6c1683f8252f3b7dc 0 1616754505896 1 connected

    The port of the primary shard we added in the last step was 7006, and we can see it on the first line. It’s id is 46a768cfeadb9d2aee91ddd882433a1798f53271.

    The resulting command is:

    $ redis-cli -p 7000 --cluster add-node 127.0.0.1:7007 127.0.0.1:7000 --cluster-slave --cluster-master-id 46a768cfeadb9d2aee91ddd882433a1798f53271

    The flag cluster-slave indicates that the shard should join as a replica and --cluster-master-id 46a768cfeadb9d2aee91ddd882433a1798f53271 specifies which primary shard it should replicate.

    Step 10

    Now our cluster has eight shards (four primary and four replica), but if we run the cluster slots command we’ll see that the newly added shards don’t host any hash slots, and thus - data. Let’s assign some hash slots to them:

    $ redis-cli  -p 7000  --cluster reshard 127.0.0.1:7000

    We use the command reshard and the address of any shard in the cluster as an argument here. In the next step we’ll be able to choose the shards we’ll be moving slots from and to.

    The first question you’ll get is about the number of slots you want to move. If we have 16384 slots in total, and four primary shards, let’s get a quarter of all shards, so the data is distributed equally. 16384 ÷ 4 is 4096, so let’s use that number.

    The next question is about the receiving shard id; the ID of the primary shard we want to move the data to, which we learned how to get in the previous Step, with the cluster nodes command.

    Finally, we need to enter the IDs of the shards we want to copy data from. Alternatively, we can type “all” and the shard will move a number of hash slots from all available primary shards.

    $ redis-cli -p 7000 --cluster reshard 127.0.0.1:7000
    ....
    ....
    ....

    How many slots do you want to move (from 1 to 16384)? 4096
    What is the receiving node ID? 46a768cfeadb9d2aee91ddd882433a1798f53271
    Please enter all the source node IDs.
    Type 'all' to use all the nodes as source nodes for the hash slots.
    Type 'done' once you entered all the source nodes IDs.
    Source node #1: all

    Ready to move 4096 slots.
    Source nodes:
    M: f224ecabedf39d1fffb34fb6c1683f8252f3b7dc 127.0.0.1:7000
    slots:[0-5460] (5461 slots) master
    1 additional replica(s)
    M: 5b4e4be56158cf6103ffa3035024a8d820337973 127.0.0.1:7001
    slots:[5461-10922] (5462 slots) master
    1 additional replica(s)
    M: a138f48fe038b93ea2e186e7a5962fb1fa6e34fa 127.0.0.1:7002
    slots:[10923-16383] (5461 slots) master
    1 additional replica(s)
    Destination node:
    M: 46a768cfeadb9d2aee91ddd882433a1798f53271 127.0.0.1:7006
    slots: (0 slots) master
    1 additional replica(s)
    Resharding plan:
    Moving slot 5461 from 5b4e4be56158cf6103ffa3035024a8d820337973
    Moving slot 5462 from 5b4e4be56158cf6103ffa3035024a8d820337973

    Do you want to proceed with the proposed reshard plan (yes/no)?
    Moving slot 5461 from 127.0.0.1:7001 to 127.0.0.1:7006:
    Moving slot 5462 from 127.0.0.1:7001 to 127.0.0.1:7006:
    Moving slot 5463 from 127.0.0.1:7001 to 127.0.0.1:7006:
    ....
    ....
    ....

    Once the command finishes we can run the cluster slots command again and we’ll see that our new primary and replica shards have been assigned some hash slots:

    $ redis-cli -p 7000 cluster slots
    - + \ No newline at end of file diff --git a/operate/redis-at-scale/scalability/index.html b/operate/redis-at-scale/scalability/index.html index 33297371d5..af0154b4e0 100644 --- a/operate/redis-at-scale/scalability/index.html +++ b/operate/redis-at-scale/scalability/index.html @@ -4,7 +4,7 @@ Ensuring Scalability in Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Ensuring Scalability in Redis

    Hello World! Scalability

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/scalability/lustering-in-redis/index.html b/operate/redis-at-scale/scalability/lustering-in-redis/index.html index b8b2de4675..1f683c98e2 100644 --- a/operate/redis-at-scale/scalability/lustering-in-redis/index.html +++ b/operate/redis-at-scale/scalability/lustering-in-redis/index.html @@ -4,7 +4,7 @@ 4.0 Clustering In Redis | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    4.0 Clustering In Redis


    Profile picture for Justin Castilla
    Author:
    Justin Castilla, Senior Developer Advocate at Redis



    Before we jump into the details, let's first address the elephant in the room: DBaaS offerings, or "database-as-a-service" in the cloud. No doubt, it's useful to know how Redis scales and how you might deploy it. But deploying and maintaining a Redis cluster is a fair amount of work. So if you don't want to deploy and manage Redis yourself, then consider signing up for Redis Cloud, our managed service, and let us do the scaling for you. Of course, that route is not for everyone. And as I said, there's a lot to learn here, so let's dive in.

    We'll start with scalability. Here's one definition:

    “Scalability is the property of a system to handle a growing amount of work by adding resources to the system.” Wikipedia

    The two most common scaling strategies are vertical scaling and horizontal scaling. Vertical scaling, or also called “Scaling Up”, means adding more resources like CPU or memory to your server. Horizontal scaling, or “Scaling out”, implies adding more servers to your pool of resources. It's the difference between just getting a bigger server and deploying a whole fleet of servers.

    Let's take an example. Suppose you have a server with 128 GB of RAM, but you know that your database will need to store 300 GB of data. In this case, you’ll have two choices: you can either add more RAM to your server so it can fit the 300GB dataset, or you can add two more servers and split the 300GB of data between the three of them. Hitting your server’s RAM limit is one reason you might want to scale up, or out, but reaching the performance limit in terms of throughput, or operations per second, is also an indicator that scaling is necessary.

    Since Redis is mostly single-threaded, Redis cannot make use of the multiple cores of your server’s CPU for command processing. But if we split the data between two Redis servers, our system can process requests in parallel, increasing the throughput by almost 200%. In fact, performance will scale close to linearly by adding more Redis servers to the system. This database architectural pattern of splitting data between multiple servers for the purpose of scaling is called sharding. The resulting servers that hold chunks of the data are called shards.

    This performance increase sounds amazing, but it doesn’t come without some cost: if we divide and distribute our data across two shards, which are just two Redis server instances, how will we know where to look for each key? We need to have a way to consistently map a key to a specific shard. There are multiple ways to do this and different databases adopt different strategies. The one Redis chose is called “Algorithmic sharding” and this is how it works:

    In order to find the shard on which a key lives we compute a numeric hash value out of the key name and modulo divide it by the total number of shards. Because we are using a deterministic hash function the key “foo” will always end up on the same shard, as long as the number of shards stays the same.

    But what happens if we want to increase our shard count even further, a process commonly called resharding? Let’s say we add one new shard so that our total number of shards is three. When a client tries to read the key “foo” now, they will run the hash function and modulo divide by the number of shards, as before, but this time the number of shards is different and we’re modulo dividing with three instead of two. Understandably, the result may be different, pointing us to the wrong shard!

    Resharding is a common issue with the algorithmic sharding strategy and can be solved by rehashing all the keys in the keyspace and moving them to the shard appropriate to the new shard count. This is not a trivial task, though, and it can require a lot of time and resources, during which the database will not be able to reach its full performance or might even become unavailable.

    Redis chose a very simple approach to solving this problem: it introduced a new, logical unit that sits between a key and a shard, called a hash slot.

    One shard can contain many hash slots, and a hash slot contains many keys. The total number of hash slots in a database is always 16384 (16K). This time, the modulo division is not done with the number of shards anymore, but instead with the number of hash slots, that stays the same even when resharding and the end result will give us the position of the hash slot where the key we’re looking for lives. And when we do need to reshard, we simply move hash slots from one shard to another, distributing the data as required across the different redis instances.

    Now that we know what sharding is and how it works in Redis, we can finally introduce Redis Cluster. Redis Cluster provides a way to run a Redis installation where data is automatically split across multiple Redis servers, or shards. Redis Cluster also provides high availability. So, if you're deploying Redis Cluster, you don't need (or use) Redis Sentinel.

    Redis Cluster can detect when a primary shard fails and promote a replica to a primary without any manual intervention from the outside. How does it do it? How does it know that a primary shard has failed, and how does it promote its replica to be the new primary shard? We need to have replication enabled. Say we have one replica for every primary shard. If all our data is divided between three Redis servers, we would need a six-member cluster, with three primary shards and three replicas.

    All 6 shards are connected to each other over TCP and constantly PING each other and exchange messages using a binary protocol. These messages contain information about which shards have responded with a PONG, so are considered alive, and which haven’t.

    When enough shards report that a certain primary shard is not responding to them, they can agree to trigger a failover and promote the shard’s replica to become the new primary. How many shards need to agree that a shard is offline before a failover is triggered? Well, that’s configurable and you can set it up when you create a cluster, but there are some very important guidelines that you need to follow.

    If you have an even number of shards in the cluster, say six, and there’s a network partition that divides the cluster in two, you'll then have two groups of three shards. The group on the left side will not be able to talk to the shards from the group on the right side, so the cluster will think that they are offline and it will trigger a failover of any primary shards, resulting in a left side with all primary shards. On the right side, the three shards will see the shards on the left as offline, and will trigger a failover on any primary shard that was on the left side, resulting in a right side of all primary shards. Both sides, thinking they have all the primaries, will continue to receive client requests that modify data, and that is a problem, because maybe client A sets the key “foo” to “bar” on the left side, but a client B sets the same key’s value to “baz” on the right side.

    When the network partition is removed and the shards try to rejoin, we will have a conflict, because we have two shards - holding different data claiming to be the primary and we wouldn’t know which data is valid.

    This is called a split brain situation, and is a very common issue in the world of distributed systems. A popular solution is to always keep an odd number of shards in your cluster, so that when you get a network split, the left and right group will do a count and see if they are in the bigger or the smaller group (also called majority or minority). If they are in the minority, they will not try to trigger a failover and will not accept any client write requests.

    Here's the bottom line: to prevent split-brain situations in Redis Cluster, always keep an odd number of primary shards and two replicas per primary shard.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/scalability/redis-cli-with-redis-cluster/index.html b/operate/redis-at-scale/scalability/redis-cli-with-redis-cluster/index.html index 0fc574a931..07c968d4f5 100644 --- a/operate/redis-at-scale/scalability/redis-cli-with-redis-cluster/index.html +++ b/operate/redis-at-scale/scalability/redis-cli-with-redis-cluster/index.html @@ -4,7 +4,7 @@ 4.2 Using Redis-CLI with a Redis Cluster | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    4.2 Using Redis-CLI with a Redis Cluster

    When you use redis-cli to connect to a shard of a Redis Cluster, you are connected to that shard only, and cannot access data from other shards. If you try to access keys from the wrong shard, you will get a MOVED error.

    There is a trick you can use with redis-cli so you don’t have to open connections to all the shards, but instead you let it do the connect and reconnect work for you. It’s the redis-cli cluster support mode, triggered by the -c switch:

    $ redis-cli -p 7000 -c

    When in cluster mode, if the client gets an (error) MOVED 15495 127.0.0.1:7002 error response from the shard it’s connected to, it will simply reconnect to the address returned in the error response, in this case 127.0.0.1:7002.

    Now it’s your turn: use redis-cli cluster mode to connect to your cluster and try accessing keys in different shards. Observe the response messages.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/scalability/redis-cluster-and-client-libraries/index.html b/operate/redis-at-scale/scalability/redis-cluster-and-client-libraries/index.html index d8737b4a4a..0424d0c7f2 100644 --- a/operate/redis-at-scale/scalability/redis-cluster-and-client-libraries/index.html +++ b/operate/redis-at-scale/scalability/redis-cluster-and-client-libraries/index.html @@ -4,7 +4,7 @@ 4.3 Redis Cluster and Client Libraries | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    4.3 Redis Cluster and Client Libraries

    To use a client library with Redis Cluster, the client libraries need to be cluster-aware. Clients that support Redis Cluster typically feature a special connection module for managing connections to the cluster. The process that some of the better client libraries follow usually goes like this:

    The client connects to any shard in the cluster and gets the addresses of the rest of the shards. The client also fetches a mapping of hash slots to shards so it can know where to look for a key in a specific hash slot. This hash slot map is cached locally.


    Hash Slot Diagram

    When the client needs to read/write a key, it first runs the hashing function (crc16) on the key name and then modulo divides by 16384, which results in the key’s hash slot number.

    In the example below the hash slot number for the key “foo” is 12182. Then the client checks the hash slot number against the hash slot map to determine which shard it should connect to. In our example, the hash slot number 12182 lives on shard 127.0.0.1:7002.

    Finally, the client connects to the shard and finds the key it needs to work with.


    Hash Slot Map Diagram

    If the topology of the cluster changes for any reason and the key has been moved, the shard will respond with an (error) MOVED 15495 127.0.0.1:7006 error, returning the address of the new shard responsible for that key. This indicates to the client that it needs to re-query the cluster for its topology and hash slot allocation, so it will do that and update its local hash slot map for future queries.

    Not every client library has this extra logic built in, so when choosing a client library, make sure to look for ones with cluster support.

    Another detail to check is if the client stores the hash slot map locally. If it doesn’t, and it relies on the (error) MOVED response to get the address of the right shard, you can expect to have a much higher latency than usual because your client may have to make two network requests instead of one for a big part of the requests.

    Examples of clients that support Redis cluster:

    • Java: Jedis
    • .NET: StackExchange.Redis
    • Go: Radix, go-redis/redis
    • Node.js: node-redis, ioredis
    • Python: redis-py

    Here's a list of Redis Clients: https://redis.io/clients

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/talking-to-redis/client-performance-improvements/index.html b/operate/redis-at-scale/talking-to-redis/client-performance-improvements/index.html index e2d559c261..a233da0752 100644 --- a/operate/redis-at-scale/talking-to-redis/client-performance-improvements/index.html +++ b/operate/redis-at-scale/talking-to-redis/client-performance-improvements/index.html @@ -4,7 +4,7 @@ 1.4 Client Performance Improvements | The Home of Redis Developers - + @@ -15,7 +15,7 @@ With connection pooling, the client library will instantiate a series of (persistent) connections to the Redis server and keep them open. When the application needs to send a request, the current thread will get one of these connections from the pool, use it, and return it when done.


    Connection Pool diagram
    So if possible, always try to choose a client library that supports pooling connections, because that decision alone can have a huge influence on your system’s performance.

    Pipelining

    As in any client-server application, Redis can handle many clients simultaneously. Each client does a (typically blocking) read on a socket and waits for the server response. The server reads the request from the socket, parses it, processes it, and writes the response to the socket. The time the data packets take to travel from the client to the server, and then back again, is called network round trip time, or RTT. If, for example, you needed to execute 50 commands, you would have to send a request and wait for the response 50 times, paying the RTT cost every single time. To tackle this problem, Redis can process new requests even if the client hasn't already read the old responses. This way, you can send multiple commands to the server without waiting for the replies at all; the replies are read in the end, in a single step.


    Pipelining diagram
    This technique is called pipelining and is another good way to improve the performance of your system. Most Redis libraries support this technique out of the box.

    Supplemental Reading:

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/talking-to-redis/command-line-tool/index.html b/operate/redis-at-scale/talking-to-redis/command-line-tool/index.html index 480e88611d..d822896a66 100644 --- a/operate/redis-at-scale/talking-to-redis/command-line-tool/index.html +++ b/operate/redis-at-scale/talking-to-redis/command-line-tool/index.html @@ -4,7 +4,7 @@ 1.1 The Command Line Tool: Redis-CLI | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    1.1 The Command Line Tool: Redis-CLI



    Redis-cli is a command line tool used to interact with the Redis server. Most package managers include redis-cli as part of the redis package. It can also be compiled from source, and you'll find the source code in the Redis repository on GitHub.

    There are two ways to use redis-cli :

    • an interactive mode where the user types commands and sees the replies;
    • a command mode where the command is provided as an argument to redis-cli, executed, and results sent to the standard output.

    Let’s use the CLI to connect to a Redis server running at 172.22.0.3 and port 7000. The arguments -h and -p are used to specify the host and port to connect to. They can be omitted if your server is running on the default host "localhost" port 6379.

    The redis-cli provides some useful productivity features. For example, you can scroll through your command history by pressing the up and down arrow keys. You can also use the TAB key to autocomplete a command, saving even more keystrokes. Just type the first few letters of a command and keep pressing TAB until the command you want appears on screen.

    Once you have the command name you want, the CLI will display syntax hints about the arguments so you don’t have to remember all of them, or open up the Redis command documentation.

    These three tips can save you a lot of time and take you a step closer to being a power user.

    You can do much more with redis-cli, like sending output to a file, scanning for big keys, get continuous stats, monitor commands and so on. For a much more detailed explanation refer to the documentation.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/talking-to-redis/configuring-a-redis-server/index.html b/operate/redis-at-scale/talking-to-redis/configuring-a-redis-server/index.html index b2bf3684df..aaa7ad3d4a 100644 --- a/operate/redis-at-scale/talking-to-redis/configuring-a-redis-server/index.html +++ b/operate/redis-at-scale/talking-to-redis/configuring-a-redis-server/index.html @@ -4,7 +4,7 @@ 1.2 Configuring a Redis Server | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    1.2 Configuring a Redis Server

    The self-documented Redis configuration file called redis.conf has been mentioned many times as an example of well written documentation. In this file you can find all possible Redis configuration directives, together with a detailed description of what they do and their default values.

    You should always adjust the redis.conf file to your needs and instruct Redis to run based on it's parameters when running Redis in production.

    The way to do that is by providing the path to the file when starting up your server:

    $ redis-server./path/to/redis.conf

    When you’re only starting a Redis server instance for testing purposes you can pass configuration directives directly on the command line:

    $ redis-server --port 7000 --replicaof 127.0.0.1:6379

    The format of the arguments passed via the command line is exactly the same as the one used in the redis.conf file, with the exception that the keyword is prefixed with --.

    Note that internally this generates an in-memory temporary config file where arguments are translated into the format of redis.conf.

    It is possible to reconfigure a running Redis server without stopping or restarting it by using the special commands CONFIG SET and CONFIG GET.

    127.0.0.1:6379> CONFIG GET *

    127.0.0.1:6379> CONFIG SET something

    127.0.0.1:6379> CONFIG REWRITE

    Not all the configuration directives are supported in this way, but you can check the output of the command CONFIG GET * first for a list of all the supported ones.

    tip

    Modifying the configuration on the fly has no effect on the redis.conf file. At the next restart of Redis the old configuration will be used instead. If you want to force update the redis.conf file with your current configuration settings you can run the CONFIG REWRITE command, which will automatically scan your redis.conf file and update the fields which don't match the current configuration value.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/talking-to-redis/index.html b/operate/redis-at-scale/talking-to-redis/index.html index 922284941c..aa8b86c53a 100644 --- a/operate/redis-at-scale/talking-to-redis/index.html +++ b/operate/redis-at-scale/talking-to-redis/index.html @@ -4,7 +4,7 @@ Talking to Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Talking to Redis


    Profile picture for Justin Castilla
    Author:
    Justin Castilla, Senior Developer Advocate at Redis

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/talking-to-redis/initial-tuning/index.html b/operate/redis-at-scale/talking-to-redis/initial-tuning/index.html index 1bf56d2c3c..75f89630bf 100644 --- a/operate/redis-at-scale/talking-to-redis/initial-tuning/index.html +++ b/operate/redis-at-scale/talking-to-redis/initial-tuning/index.html @@ -4,7 +4,7 @@ 1.5 Initial Tuning | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    1.5 Initial Tuning

    We love Redis because it’s fast (and fun!), so as we begin to consider scaling out Redis, we first want to make sure we've done everything we can to maximize its performance.

    Let's start by looking at some important tuning parameters.

    Max Clients

    Redis has a default of max of 10,000 clients; after that maximum has been reached, Redis will respond to all new connections with an error. If you have a lot of connections (or a lot of application instances), then you may need to go higher. You can set the max number of simultaneous clients in the Redis config file:

    maxclients 20000

    Max Memory

    By default, Redis has no max memory limit, so it will use all available system memory. If you are using replication, you will want to limit the memory usage in order to have overhead for replica output buffers. It’s also a good idea to leave memory for the system. Something like 25% overhead. You can update this setting in Redis config file:

    # memory size in bytes
    maxmemory 1288490188

    Set TCP-BACKLOG

    The Redis server uses the value of tcp-backlog to specify the size of the complete connection queue.

    Redis passes this configuration as the second parameter of the listen(int s, int backlog) call.

    If you have many connections, you will need to set this higher than the default of 511. You can update this in Redis config file:

    # TCP listen() backlog.
    #
    # In high requests-per-second environments you need an high backlog in order
    # to avoid slow clients connections issues. Note that the Linux kernel
    # will silently truncate it to the value of /proc/sys/net/core/somaxconn so
    # make sure to raise both the value of somaxconn and tcp_max_syn_backlog
    # in order to get the desired effect.
    tcp-backlog 65536

    As the comment in redis.conf indicates, the value of somaxconn and tcp_max_syn_backlog may need to be increased at the OS level as well.

    Set Read Replica Configurations

    One simple way to scale Redis is to add read replicas and take load off of the primary. This is most effective when you have a read-heavy (as opposed to write-heavy) workload. You will probably want to have the replica available and still serving stale data, even if the replication is not completed. You can update this in the Redis config:

    slave-serve-stale-data yes

    You will also want to prevent any writes from happening on the replicas. You can update this in the Redis config:

    slave-read-only yes

    Kernel Memory

    Under high load, occasional performance dips can occur due to memory allocation. This is something Salvatore, the creator of Redis, blogged about in the past. The performance issue is related to transparent hugepages, which you can disable at the OS level if needed.

    $ echo 'never' > /sys/kernel/mm/transparent_hugepage/enabled

    Kernel Network Stack

    If you plan on handling a large number of connections in a high performance environment, we recommend tuning the following kernel parameters:

    vm.swappiness=0                       # turn off swapping
    net.ipv4.tcp_sack=1 # enable selective acknowledgements
    net.ipv4.tcp_timestamps=1 # needed for selective acknowledgements
    net.ipv4.tcp_window_scaling=1 # scale the network window
    net.ipv4.tcp_congestion_control=cubic # better congestion algorithm
    net.ipv4.tcp_syncookies=1 # enable syn cookies
    net.ipv4.tcp_tw_recycle=1 # recycle sockets quickly
    net.ipv4.tcp_max_syn_backlog=NUMBER # backlog setting
    net.core.somaxconn=NUMBER # up the number of connections per port
    net.core.rmem_max=NUMBER # up the receive buffer size
    net.core.wmem_max=NUMBER # up the buffer size for all connections

    File Descriptor Limits

    If you do not set the correct number of file descriptors for the Redis user, you will see errors indicating that “Redis can’t set maximum open files..” You can increase the file descriptor limit at the OS level.

    Here's an example on Ubuntu using systemd:

    /etc/systemd/system/redis.service
    [Service]
    ...
    User=redis
    Group=redis
    ...
    LimitNOFILE=65536
    ...

    You will then need to reload the daemon and restart the redis service.

    Enabling RPS (Receive Packet Steering) and CPU preferences

    One way we can improve performance is to prevent Redis from running on the same CPUs as those handling any network traffic. This can be accomplished by enabling RPS for our network interfaces and creating some CPU affinity for our Redis process.

    Here is an example. First we can enable RPS on CPUs 0-1:

    $ echo '3' > /sys/class/net/eth1/queues/rx-0/rps_cpus

    Then we can set the CPU affinity for redis to CPUs 2-8:

    # config is set to write pid to /var/run/redis.pid
    $ taskset -pc 2-8 `cat /var/run/redis.pid`
    pid 8946's current affinity list: 0-8
    pid 8946's new affinity list: 2-8
    - + \ No newline at end of file diff --git a/operate/redis-at-scale/talking-to-redis/redis-clients/index.html b/operate/redis-at-scale/talking-to-redis/redis-clients/index.html index d7ebc3ae58..265dab856f 100644 --- a/operate/redis-at-scale/talking-to-redis/redis-clients/index.html +++ b/operate/redis-at-scale/talking-to-redis/redis-clients/index.html @@ -4,7 +4,7 @@ 1.3 Redis Clients | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    1.3 Redis Clients



    Redis has a client-server architecture and uses a request-response model. Applications send requests to the Redis server, which processes them and returns responses for each. The role of a Redis client library is to act as an intermediary between your application and the Redis server.

    Client libraries perform the following duties:

    • Implement the Redis wire protocol - the format used to send requests to and receive responses from the Redis server
    • Provide an idiomatic API for using Redis commands from a particular programming language

    Managing the connection to Redis

    Redis clients communicate with the Redis server over TCP, using a protocol called RESP (REdis Serialization Protocol) designed specifically for Redis.

    The RESP protocol is simple and text-based, so it is easily read by humans, as well as machines. A common request/response would look something like this. Note that we're using netcat here to send raw protocol:

    This simple, well documented protocol has resulted in Redis clients for almost every language you can think of. The redis.io client page lists over 200 client libraries for more than 50 programming languages.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/talking-to-redis/redis-server-overview/index.html b/operate/redis-at-scale/talking-to-redis/redis-server-overview/index.html index 22ac1324d9..d2c4e4734b 100644 --- a/operate/redis-at-scale/talking-to-redis/redis-server-overview/index.html +++ b/operate/redis-at-scale/talking-to-redis/redis-server-overview/index.html @@ -4,7 +4,7 @@ 1.0 Redis Server Overview | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    1.0 Redis Server Overview



    As you might already know, Redis is an open source data structure server written in C. You can store multiple data types, like strings, hashes, and streams and access them by a unique key name.

    For example, if you have a string value “Hello World” saved under the key name “greeting”, you can access it by running the GET command followed by the key name - greeting. All keys in a Redis database are stored in a flat keyspace. There is no enforced schema or naming policy, and the responsibility for organizing the keyspace is left to the developer.

    The speed Redis is famous for is mostly due to the fact that Redis stores and serves data entirely from RAM instead of disk, as most other databases do. Another contributing factor is its predominantly single-threaded nature: single-threading avoids race conditions and CPU-heavy context switching associated with threads.

    Indeed, this means that open source Redis can’t take advantage of the processing power of multiple CPU cores, although CPU is rarely the bottleneck with Redis. You are more likely to bump up against memory or network limitations before hitting any CPU limitations. That said, Redis Enterprise does let you take advantage of all of the cores on a single machine.

    Let’s now look at exactly what happens behind the scenes with every Redis request. When a client sends a request to a Redis server, the request is first read from the socket, then parsed and processed and finally, the response is written back to the socket and sent to the user. The reading and especially writing to a socket are expensive operations, so in Redis version 6.0 multi-threaded I/O was introduced. When this feature is enabled, Redis can delegate the time spent reading and writing to I/O sockets over to other threads, freeing up cycles for storing and retrieving data and boosting overall performance by up to a factor of two for some workloads.

    Throughout the rest of the section, you’ll learn how to use the Redis command-line interface, how to configure your Redis server, and how to choose and tune your Redis client library.

    - + \ No newline at end of file diff --git a/redis-insiders/index.html b/redis-insiders/index.html index b9341f64e4..44274e01bd 100644 --- a/redis-insiders/index.html +++ b/redis-insiders/index.html @@ -4,7 +4,7 @@ Redis Insiders, our ambassador program | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Redis Insiders, our ambassador program


    Profile picture for Suze Shardlow
    Author:
    Suze Shardlow, Developer Community Manager at Redis

    Redis Insiders is our first community ambassador program, launched in May 2022.

    Our Redis Insiders are enthusiastic community members who love using Redis, and helping others to use it. They spread the word through technical talks, blog posts, live streams and more!

    Meet our Redis Insiders

    Our current Insiders are...

    Jyotsna Gupta

    Jyotsna Gupta headshot

    Jyotsna is based in Bangalore, India and works as a Senior Software Engineer at Gojek (GoPay). She is an open source enthusiast and has been using Redis for the past three years, with Golang and Java. She applied to become a Redis Insider because she is passionate about communities and loves to share and learn together. When she’s not working or volunteering, she energises herself by playing badminton, table tennis, basketball, Carrom and chess.

    Find out more about Jyotsna.

    Moiz Kapasi

    Moiz Kapasi headshot

    Moiz Kapasi is a Paris, France-based Solution Architect at Capgemini who builds apps in the enterprise landscape of a major European car manufacturer, using Java / J2EE. Moiz was drawn to the Redis Insiders program because, since he started using Redis 1.5 years ago, the simplicity and power of Redis has fascinated him. His hobbies include cycling along the River Seine, camping, reading classic literature and philately.

    Find out more about Moiz.

    Michael Owolabi

    Michael Owolabi headshot

    Michael is a Senior Software Engineer at Spleet, from Lagos, Nigeria. He is a JavaScript programmer and has been using Redis for more than two years. In his spare time, Michael enjoys travelling, adventure, writing, and volunteering. Michael applied to become a Redis Insider because as a lover of Redis himself, he wanted an opportunity to meet with and learn from other Redis professionals around the globe and also share his knowledge of Redis through speaking and writing.

    Find out more about Michael.

    Stevan Thomas

    Stevan Thomas headshot

    Stevan is a Senior Software Engineer at Vela, with five years of Redis experience. He builds web, mobile and desktop apps for a variety of industries including shipping / logistics, finance, retail and health using JavaScript, Swift, Java, C# and Python. Stevan lives in Port of Spain, Trinidad and Tobago and, in his spare time, enjoys hiking, fitness / CrossFit, watching movies and learning new technologies. He is excited to join the Redis Insiders program because he wants to be a positive influence in the adoption of Redis as a primary database for developers.

    Find out more about Stevan.

    - + \ No newline at end of file diff --git a/redis-insiders/jyotsna-gupta/index.html b/redis-insiders/jyotsna-gupta/index.html index 890a489e8f..d10cb99368 100644 --- a/redis-insiders/jyotsna-gupta/index.html +++ b/redis-insiders/jyotsna-gupta/index.html @@ -4,7 +4,7 @@ Jyotsna Gupta, Redis Insider | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Jyotsna Gupta, Redis Insider


    Profile picture for Suze Shardlow
    Author:
    Suze Shardlow, Developer Community Manager at Redis
    Profile picture for Jyotsna Gupta
    Author:
    Jyotsna Gupta, Redis Insider

    Jyotsna Gupta headshot

    "I am an Open Source Enthusiast, working as a Senior Software Engineer at Gojek (GoPay). I am based out of Bangalore, India. I have been using Redis for the last 3 years.

    "I applied to become a Redis Insider because I am highly passionate about communities and love to share and learn together, and I am currently trying to get the expertise in Redis at my current workplace.

    "Apart from my work and volunteering, I energize myself by playing badminton, table tennis, Carrom, chess and basketball. I prefer to spend most of the time in my room, else I feel to travel the world. If you don’t see me doing any of the above, then you’ll find probably find me sleeping and dreaming for hours, maybe days?"

    Find Jyotsna online at:

    - + \ No newline at end of file diff --git a/redis-insiders/michael-owolabi/index.html b/redis-insiders/michael-owolabi/index.html index ba50839128..4876dabf3b 100644 --- a/redis-insiders/michael-owolabi/index.html +++ b/redis-insiders/michael-owolabi/index.html @@ -4,7 +4,7 @@ Michael Owolabi, Redis Insider | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Michael Owolabi, Redis Insider


    Profile picture for Suze Shardlow
    Author:
    Suze Shardlow, Developer Community Manager at Redis
    Profile picture for Michael Owolabi
    Author:
    Michael Owolabi, Redis Insider

    Michael Owolabi headshot

    "I am a Senior Software Engineer at Spleet and have been using Redis for 2+ years. I build software products using Node.js."

    "I am based in Lagos, Nigeria, and, in my spare time, I enjoy traveling, adventure, writing and volunteering.

    "I applied to become a Redis Insider because as a lover of Redis myself, I wanted an opportunity to meet with and learn from other Redis professionals around the globe and also share my knowledge of Redis through speaking and writing."

    Find Michael online at:

    - + \ No newline at end of file diff --git a/redis-insiders/moiz-kapasi/index.html b/redis-insiders/moiz-kapasi/index.html index b3bc56e442..2046d39341 100644 --- a/redis-insiders/moiz-kapasi/index.html +++ b/redis-insiders/moiz-kapasi/index.html @@ -4,7 +4,7 @@ Moiz Kapasi, Redis Insider | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Moiz Kapasi, Redis Insider


    Profile picture for Suze Shardlow
    Author:
    Suze Shardlow, Developer Community Manager at Redis
    Profile picture for Moiz Kapasi
    Author:
    Moiz Kapasi, Redis Insider

    Moiz Kapasi headshot

    "I am a Solution Architect at Capgemini and have been using Redis for 1.5 years. I build/manage/enhance various applications in the enterprise landscape of a major car manufacturer in Europe chain using Java/J2EE stack.

    "I am based in Paris, France. In my spare time, I enjoy cycling along the River Seine, camping, reading classic literature and philately.

    "I applied to become a Redis Insider because the simplicity and the power of Redis fascinates me. We can do so much with this incredible software. I dug deep in Redis only in the last 15 months and I feel I am still scratching the surface. So many more things to do, so many applications to be made better, faster and more reliable by using the power of Redis."

    Find Moiz online at:

    - + \ No newline at end of file diff --git a/redis-insiders/stevan-thomas/index.html b/redis-insiders/stevan-thomas/index.html index a9655cfbac..12394c58eb 100644 --- a/redis-insiders/stevan-thomas/index.html +++ b/redis-insiders/stevan-thomas/index.html @@ -4,7 +4,7 @@ Stevan Thomas, Redis Insider | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Stevan Thomas, Redis Insider


    Profile picture for Suze Shardlow
    Author:
    Suze Shardlow, Developer Community Manager at Redis
    Profile picture for Stevan Thomas
    Author:
    Stevan Thomas, Redis Insider

    Stevan Thomas headshot

    "I am a Senior Software Engineer at Vela and have been using Redis for 5 years. I build web, mobile and desktop apps for a variety of industries including shipping / logistics, financial, retail and Hhalth using JavaScript, TypeScript, Node.js, React, Swift, Java, C# and Python.

    "I am based in Port of Spain, Trinidad and Tobago and, in my spare time, I enjoy hiking, fitness/CrossFit, watching movies and learning new technology.

    "I applied to become a Redis Insider because I want to be a positive influence in the adoption of Redis as a primary database for developers and business owners."

    - + \ No newline at end of file diff --git a/redis-live/index.html b/redis-live/index.html index 5062abb658..5dcc17b41e 100644 --- a/redis-live/index.html +++ b/redis-live/index.html @@ -4,7 +4,7 @@ Redis Live | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Redis Live

    The Redis Developer Relations Team hosts a variety of live content on Twitch, YouTube and Discord. Follow us to get notified when we go live. And, you know, like and subscribe!

    Upcoming Events

    DateTimeStreamersShow

    Past Events

    DateStreamersShow
    Thursday, July 20thSimon PrickettSimon's IoT Show: Counting Aircraft!
    Tuesday, July 4thSimon PrickettSimon's IoT Show: Searching and Indexing Image Data with Redis Stack and Python - Episode 4
    Thursday, June 1Simon PrickettSimon's IoT Show: Searching and Indexing Image Data with Redis Stack and Python - Episode 3
    Friday, May 19Simon PrickettSimon's IoT Show: Searching and Indexing Image Data with Redis Stack
    Thursday, April 27Simon PrickettSimon's IoT Show: Storing Photos in Redis with the Raspberry Pi Camera and Python
    Friday, March 31Savannah Noremsavannah_streams_in_snake_case A Game! Episode 7
    Friday, March 24Savannah Noremsavannah_streams_in_snake_case A Game! Episode 6
    Thursday, March 23Simon PrickettSimon's IoT Show: Plane Spotting with Redis - Episode 6
    Tuesday, March 14Simon PrickettSimon's IoT Show: Plane Spotting with Redis - Episode 5
    Friday, March 10Savannah Noremsavannah_streams_in_snake_case A Game! Episode 5
    Friday, March 3Savannah Noremsavannah_streams_in_snake_case A Game! Episode 4
    Thursday, March 2Simon PrickettSimon's Redis IoT Show: Plane Spotting with Redis and Node.js Episode 4
    Friday, February 24Simon PrickettSimon's Redis IoT Show: Plane Spotting with Redis and Node.js Episode 3
    Tuesday, February 21Savannah Noremsavannah_streams_in_snake_case A Game! Ep 3
    Monday, February 20Simon PrickettSimon's Redis IoT Show: Plane Spotting with Redis and Node.js Episode 2
    Friday, February 17Savannah Noremsavannah_streams_in_snake_case A Game! Ep 2
    Thursday, February 16Simon PrickettSimon's Things on Thursdays: Plane Spotting with Redis and Node.js Episode 1
    Tuesday, February 7Savannah Noremsavannah_streams_in_snake_case A Game! Ep 1
    Thursday, February 2Simon PrickettSimon's Things on Thursdays: More Redis Streams! (Episode 2)
    Thursday, January 26Simon PrickettSimon's Things on Thursdays: More Redis Streams! (Episode 1)
    Tuesday, January 24Savannah Noremsavannah_streams_in_snake_case
    Monday, January 23Justin CastillaDo Birds Dream in JSON? - Episode 7
    Friday, January 20Savannnah Norem, Justin CastillaThis Week On Discord
    Wednesday, January 18Justin CastillaDo Birds Dream in JSON? (Episode 6 - Integrating Fast API)
    Tuesday, January 17Savannah Noremsavannah_streams_in_snake_case
    Friday, January 13Suze Shardlow, Savannnah Norem, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, January 12Suze Shardlow and Simon PrickettWeekly Redis University Office Hours
    Thursday, January 12Simon PrickettSimon's Things on Thursdays: Introduction to Redis Stack for IoT Projects
    Friday, January 6Suze Shardlow, Simon Prickett and Justin CastillaThis Week On Discord
    2022 Events
    Friday, December 16Suze Shardlow, Savannah Norem, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, December 15Suze Shardlow and Simon PrickettRedis University Office Hours
    Thursday, December 15Simon PrickettSimon's Things on Thursdays: Node-RED Episode 2
    Monday, December 12Justin CastillaDo Birds Dream in JSON? - Episode 5
    Friday, December 9Savannah Norem, Justin CastillaRU204: Live Day 5
    Friday, December 9Suze Shardlow, Savannah Norem, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, December 8Savannah Norem, Justin CastillaRU204: Live Day 4
    Thursday, December 8Suze Shardlow and Simon PrickettRedis University Office Hours
    Thursday, December 8Simon PrickettSimon's Things on Thursdays: Node-RED Episode 1
    Wednesday, December 7Savannah Norem, Justin CastillaRU204: Live Day 3
    Tuesday, December 6Savannah Norem, Justin CastillaRU204: Live Day 2
    Monday, December 5Justin CastillaDo Birds Dream in JSON? - Episode 4
    Monday, December 5Savannah Norem, Justin CastillaRU204: Live Day 1
    Friday, December 2Suze Shardlow, Savannah Norem, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, December 1Suze Shardlow, Simon Prickett and Justin CastillaRedis University Office Hours
    Thursday, December 1Simon PrickettSimon's Things on Thursdays: Wifi Setup with Raspberry Pi Pico W
    Friday, November 25Suze Shardlow and Simon PrickettThis Week On Discord
    Thursday, November 24Suze Shardlow and Simon PrickettRedis University Office Hours
    Thursday, November 24Simon PrickettSimon's Things on Thursdays: Synchronized Counting with Keyspace Notifications - Episode 2
    Tuesday, November 22Savannah Norem .savannah_streams_in_snake_case
    Monday, November 21Justin CastillaDo Birds Dream in JSON? - Episode 3
    Friday, November 18Savannah Norem, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, November 17Justin CastillaRedis University Office Hours
    Thursday, November 17Simon PrickettSimon's Things on Thursdays: Synchronized Counting with Keyspace Notifications - Episode 1
    Monday, November 14Justin CastillaDo Birds Dream in JSON? - Episode 2
    Thursday, November 10Savannah Norem and Justin CastillaThis Week On Discord
    Thursday, November 10Savannah Norem and Justin CastillaRedis University Office Hours
    Friday, November 4Suze Shardlow, Savannah Norem, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, November 3Suze Shardlow, Savannah Norem, Simon Prickett and Justin CastillaRedis University Office Hours
    Thursday, November 3Simon PrickettSimon's Things on Thursdays - Cheerlights with MQTT and Redis Streams
    Monday, October 31Justin CastillaDo Birds Dream in JSON? - Episode 1
    Friday, October 28Suze Shardlow, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, October 27Suze Shardlow and Simon PrickettWeekly Redis University Office Hours
    Thursday, October 27Simon PrickettSimon's Things on Thursdays - Raspberry Pi Pico W - Episode 5
    Wednesday, October 26Steve LorelloCoding with Steve: Redis OM .NET - Episode 10
    Tuesday, October 25Savannah Noremsavannah_streams_in_snake_case
    Friday, October 21Suze Shardlow, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, October 20Suze Shardlow and Simon PrickettWeekly Redis University Office Hours
    Thursday, October 20Simon PrickettSimon's Things on Thursdays - Raspberry Pi Pico W - Episode 4
    Wednesday, October 19Steve LorelloCoding with Steve: Redis OM .NET - Episode 9
    Tuesday, October 18Savannah Noremsavannah_streams_in_snake_case
    Friday, October 14Suze Shardlow, Savannah Norem, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, October 13Savannah Norem and Justin CastillaWeekly Redis University Office Hours
    Wednesday, October 12Steve LorelloCoding with Steve: Redis OM .NET - Episode 8
    Friday, October 7Guy RoyseBuilding Redis MUD - Web, Whimsy, and Redis Stack: Episode 5
    Friday, October 7Justin Castilla, Suze Shardlow and Simon PrickettThis Week on Discord
    Thursday, October 6Suze Shardlow and Simon PrickettWeekly Redis University Office Hours
    Thursday, October 6Simon PrickettSimon's Things on Thursdays - Raspberry Pi Pico W - Episode 3
    Tuesday, October 4Savannah Noremsavannah_streams_in_snake_case - Stream a Little Stream - Episode 5
    Friday, September 30Savannah Norem and Justin CastillaThis Week On Discord
    Thursday, September 29Savannah Norem and Justin CastillaWeekly Redis University Office Hours
    Friday, September 23Guy RoyseBuilding Redis MUD - Web, Whimsy, and Redis Stack: Episode 4
    Friday, September 23Suze Shardlow, Savannah Norem, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, September 22Suze Shardlow, Savannah Norem, Simon Prickett and Justin CastillaWeekly Redis University Office Hours
    Thursday, September 22Simon PrickettSimon's Things on Thursdays - Raspberry Pi Pico W - Episode 2
    Tuesday, September 20Savannah Noremsavannah_streams_in_snake_case - Stream a Little Stream - Episode 4
    Friday, September 16Savannah Norem and Justin CastillaThis Week On Discord
    Thursday, September 15Savannah Norem and Justin CastillaWeekly Redis University Office Hours
    Friday, September 9Suze Shardlow, Savannah Norem, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, September 8Suze Shardlow, Savannah Norem, Simon Prickett and Justin CastillaWeekly Redis University Office Hours
    Thursday, September 8Simon PrickettSimon's Things on Thursdays - Raspberry Pi Pico W - Episode 1
    Wednesday, September 7Steve LorelloCoding with Steve: Redis OM .NET - Episode 7
    Tuesday, September 6Savannah Noremsavannah_streams_in_snake_case: Stream a Little Stream - Episode 3
    Friday, September 2Guy RoyseBuilding Redis MUD - Web, Whimsy, and Redis Stack: Episode 3
    Friday, September 2Suze Shardlow, Savannah Norem and Simon PrickettThis Week On Discord
    Thursday, September 1Simon PrickettSimon's Things on Thursdays - Hardware Bloom Filter
    Wednesday, August 31Steve LorelloCoding with Steve: Redis OM .NET - Episode 6
    Tuesday, August 30Savannah Noremsavannah_streams_in_snake_case: Stream a Little Stream - Episode 2
    Friday, August 26Guy RoyseBuilding Redis MUD - Web, Whimsy, and Redis Stack: Episode 2
    Wednesday, August 24Steve LorelloCoding with Steve: Redis OM .NET - Episode 5
    Tuesday, August 23Savannah Noremsavannah_streams_in_snake_case: Stream a Little Stream - Episode 1
    Thursday, August 18Simon PrickettSimon's Things on Thursdays
    Tuesday, August 16Savannah Noremsavannah_streams_in_snake_case
    Friday, August 12Guy RoyseBuilding Redis MUD - Web, Whimsy, and Redis Stack: Episode 1
    Wednesday, August 10Steve LorelloCoding with Steve
    Tuesday, August 9Savannah Noremsavannah_streams_in_snake_case
    Friday, August 5Simon PrickettIoT with Redis: Introduction
    Wednesday, August 3Steve LorelloSteve Works on Redis OM .NET
    Tuesday, August 2Savannah Noremsavannah_streams_in_snake_case
    Friday, July 29Savannah Norem, Simon PrickettFirst Steps to Open Source Contribution
    Thursday, July 28Simon PrickettUp and Running with RU203: Querying, Indexing and Full-Text Search
    Wednesday, July 27Steve LorelloSteve Works on cli.redis.io
    Tuesday, July 26Savannah Noremsavannah_streams_in_snake_case - Probabilistic Data Structures
    Wednesday, July 20Steve LorelloSteve Works on cli.redis.io
    Friday, July 15Guy RoyseExploring Bun and Node Redis
    Thursday, July 14Simon PrickettIntroduction to Redis Streams with RedisInsight, Node and Python
    Wednesday, July 13Steve LorelloSteve Works on Redis OM .NET
    Friday, July 8Guy RoyseGraph, Graph, and Graph
    Thursday, July 7Simon PrickettUp and Running with the RU202 Redis Streams Course
    Wednesday, July 6Steve LorelloSteve Works on Redis OM .NET
    Thursday, June 30Savannah Norem, Guy RoyseComparing Sets, Bloom Filters, and Cuckoo Filters
    Wednesday, June 29Steve LorelloSteve Works on Redis OM .NET!
    Tuesday, June 28Simon PrickettUp and Running with the RU102J Redis University Course
    Friday, June 24Guy RoyseGraph, Graph, and Graph
    Thursday, June 23Justin Castilla, Savannah NoremRedis OM: Python + JSON + Search
    Thursday, June 16Simon Prickett, Justin CastillaCounting Things At Scale with Redis
    Wednesday, June 15Simon PrickettUp and Running with the RU102JS Redis University Course
    Wednesday, June 15Simon PrickettUp and Running with the RU102PY Redis University Course
    Friday, June 10Guy RoyseWorking on Redis OM for Node.js
    Friday, June 10Justin Castilla, Suze ShardlowWorking with Redis Data Structures
    Friday, June 3Guy RoyseTracking Aircraft with Redis + Software-Defined Radio
    - + \ No newline at end of file diff --git a/tags/community/index.html b/tags/community/index.html index 1737518630..6493b5fcc4 100644 --- a/tags/community/index.html +++ b/tags/community/index.html @@ -4,7 +4,7 @@ One doc tagged with "community" | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    One doc tagged with "community"

    View All Tags
    - + \ No newline at end of file diff --git a/tags/index.html b/tags/index.html index fa50c11b6f..31931b5003 100644 --- a/tags/index.html +++ b/tags/index.html @@ -4,7 +4,7 @@ Tags | The Home of Redis Developers - + @@ -12,7 +12,7 @@ - + \ No newline at end of file diff --git a/tools/index-tools/index.html b/tools/index-tools/index.html index 35e1e553bf..4ce65fc47f 100644 --- a/tools/index-tools/index.html +++ b/tools/index-tools/index.html @@ -4,7 +4,7 @@ index-tools | The Home of Redis Developers - + @@ -18,7 +18,7 @@ hide_table_of_contents: true slug: /tools/ custom_edit_url:


    - + \ No newline at end of file diff --git a/tools/riot/index.html b/tools/riot/index.html index 0deea49c9a..01abc863fa 100644 --- a/tools/riot/index.html +++ b/tools/riot/index.html @@ -4,7 +4,7 @@ RIOT | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    RIOT

    Redis Input/Output Tools (RIOT) is a set of import/export command line utilities for Redis:

    • RIOT Redis: live replication from any Redis database (including AWS Elasticache) to another Redis database.
    • RIOT DB: migrate from an RDBMS to Redis, Search, JSON, ...

    Most database migration tools available today are offline in nature. Migrating data from AWS ElastiCache to Redis Enterprise Cloud for example means backing up your Elasticache data to an AWS S3 bucket and importing it into Redis Enterprise Cloud using its UI.This implies some downtime and might result in data loss. Other available techniques include creating point-in-time snapshots of the source Redis server & applying the changes to the destination servers to keep both servers in sync. It might sound like a good approach but can be challenging when you have to maintain dozens of scripts to implement the migration strategy.

    RIOT Redis is a migration tool that allows for seamless live replication between two Redis databases.

    1. Getting Started

    Download the latest release and unzip the archive.

    Launch the bin/riot-redis script and follow the usage information provided.

    2. Build and Run

    git clone https://github.com/redis-developer/riot.git
    cd riot/riot-redis
    ./riot-redis

    3. Install via Homebrew (macOS)

    brew install jruaux/tap/riot-redis`

    Usage

    ❯ riot-redis
    Usage: {app} [OPTIONS] [COMMAND]
    --help Show this help message and exit.
    -V, --version Print version information and exit.
    -q, --quiet Log errors only
    -d, --debug Log in debug mode (includes normal stacktrace)
    -i, --info Set log level to info

    You can use --help on any subcommand:

    ❯ riot-redis --help

    ❯ riot-redis import --help

    ❯ riot-redis import .. hset --help

    Redis connection options are the same as redis-cli:

      -h, --hostname=<host>     Server hostname (default: 127.0.0.1)
    -p, --port=<port> Server port (default: 6379)
    -s, --socket=<socket> Server socket (overrides hostname and port)
    --user=<username> Used to send ACL style 'AUTH username pass'. Needs password.
    -a, --pass[=<password>] Password to use when connecting to the server
    -u, --uri=<uri> Server URI
    -o, --timeout=<sec> Redis command timeout (default: 60)
    -n, --db=<int> Database number (default: 0)
    -c, --cluster Enable cluster mode
    -t, --tls Establish a secure TLS connection
    -l, --latency Show latency metrics
    -m, --pool=<int> Max pool connections (default: 8)

    Redis URI syntax is described here.

    4. Example

    Here is an example of a live replication from a source Redis running on localhost and port 6379, to a target Redis running on localhost and port 6380:

    ❯ riot-redis -h source -p 6379 replicate --idle-timeout 500 -h target -p 6380 --live

    5. Verification

    Once replication is complete RIOT Redis will perform a verification step to compare values and TTLs between source and target databases. The output looks like this:

    OK:1000 V:0 >:0 <:0 T:0
    • OK: # identical values

    • V: # mismatched values

    • : # keys only present in source database

    • <: # keys only present in target database

    • T: # keys with TTL difference greater than tolerance

    6. Architecture

    RIOT Redis implements client-side replication using a producer/consumer approach:

    • the producer is connected to the source Redis (e.g. ElastiCache) and iterates over keys to read their corresponding values

    • the consumer is connected to the target Redis (e.g. Redis Enterprise Cloud) and writes the key/value tuples previously created

    1. Key reader: initiates a SCAN and optionally calls SUBSCRIBE to listen for keyspace notifications (live replication).
    2. Value reader: takes the keys and calls DUMP and TTL.
    3. Key/Value writer: takes key/value/ttl tuples and calls RESTORE and EXPIRE.
    note

    Live replication makes use of keyspace notifications. Make sure the source Redis database has keyspace notifications enabled using notify-keyspace-events = KA in redis.conf or via CONFIG SET.

    note

    The live replication mechanism does not guarantee data consistency. Redis sends keyspace notifications over pub/sub which does not provide guaranteed delivery. It is possible that RIOT Redis can miss some notifications in case of network failures for example.

    - + \ No newline at end of file diff --git a/tutorials/redisearch/getting-started/index.html b/tutorials/redisearch/getting-started/index.html index e7ea31fcbe..8921cdd6b2 100644 --- a/tutorials/redisearch/getting-started/index.html +++ b/tutorials/redisearch/getting-started/index.html @@ -4,7 +4,7 @@ Redis Search Getting Started | The Home of Redis Developers - + @@ -12,7 +12,7 @@ - + \ No newline at end of file