diff --git a/.github/workflows/k8s_apis_sync.yaml b/.github/workflows/k8s_apis_sync.yaml index a4ad0bb0a..7ab36956b 100644 --- a/.github/workflows/k8s_apis_sync.yaml +++ b/.github/workflows/k8s_apis_sync.yaml @@ -80,6 +80,41 @@ jobs: sed -E -i 's/\[index\]/\[\]/g' artifacts/redis_enterprise_remote_cluster_api.md awk '/(#[^")]+)index/ {gsub(/index/,"")}; {print}' artifacts/redis_enterprise_remote_cluster_api.md > _tmp.md && mv _tmp.md artifacts/redis_enterprise_remote_cluster_api.md + - name: 'Generate YAML snippets' + run: |- + function formatYamlSnippet() { + cat > "$2" << EOL + \`\`\`yaml + $(cat $1) + \`\`\` + EOL + } + + formatYamlSnippet admission-service.yaml content/embeds/admission-service.md + formatYamlSnippet admission/webhook.yaml content/embeds/admission_webhook.md + formatYamlSnippet examples/v1/rec.yaml content/embeds/rec.md + formatYamlSnippet examples/v1alpha1/reaadb.yaml content/embeds/reaadb.md + formatYamlSnippet examples/v1alpha1/redb.yaml content/embeds/redb.md + formatYamlSnippet examples/v1alpha1/rerc.yaml content/embeds/rerc.md + formatYamlSnippet log_collector/log_collector_role_all_mode.yaml content/embeds/log_collector_role_all_mode.md + formatYamlSnippet log_collector/log_collector_role_restricted_mode.yaml content/embeds/log_collector_role_restricted_mode.md + formatYamlSnippet multi-namespace-redb/operator_cluster_role_binding.yaml content/embeds/multi-ns_operator_cluster_role_binding.md + formatYamlSnippet multi-namespace-redb/operator_cluster_role.yaml content/embeds/multi-ns_operator_cluster_role.md + formatYamlSnippet multi-namespace-redb/role_binding.yaml content/embeds/multi-ns_role_binding.md + formatYamlSnippet multi-namespace-redb/role.yaml content/embeds/multi-ns_role.md + formatYamlSnippet openshift/admission-service.yaml content/embeds/openshift_admission-service.md + formatYamlSnippet openshift/rec_rhel.yaml content/embeds/openshift_rec.md + formatYamlSnippet openshift/role_binding.yaml content/embeds/openshift_role_binding.md + formatYamlSnippet openshift/role.yaml content/embeds/openshift_role.md + formatYamlSnippet openshift/scc.yaml content/embeds/openshift_scc.md + formatYamlSnippet openshift/service_account.yaml content/embeds/openshift_service_account.md + formatYamlSnippet rack_awareness/rack_aware_cluster_role_binding.yaml content/embeds/rack_aware_cluster_role_binding.md + formatYamlSnippet rack_awareness/rack_aware_cluster_role.yaml content/embeds/rack_aware_cluster_role.md + formatYamlSnippet rack_awareness/rack_aware_rec.yaml content/embeds/rack_aware_rec.md + formatYamlSnippet role_binding.yaml content/embeds/role_binding.md + formatYamlSnippet role.yaml content/embeds/role.md + formatYamlSnippet service_account.yaml content/embeds/service_account.md + - name: 'Send pull request' env: GH_TOKEN: ${{ steps.generate-token.outputs.token }} @@ -101,6 +136,8 @@ jobs: git apply content/operate/kubernetes/reference/kubernetes-api-reference-frontmatter.patch git add content/operate/kubernetes/reference/ + git add content/embeds/ + git commit -m "k8s api docs ${RELEASE}" git push origin "${BRANCH}" diff --git a/content/commands/auth/index.md b/content/commands/auth/index.md index 7e18f8d17..0d9f9ee63 100644 --- a/content/commands/auth/index.md +++ b/content/commands/auth/index.md @@ -51,7 +51,9 @@ The AUTH command authenticates the current connection in two cases: Redis versions prior of Redis 6 were only able to understand the one argument version of the command: - AUTH +{{< clients-example cmds_cnxmgmt auth1 >}} +AUTH "temp-pass" +{{< /clients-example >}} This form just authenticates against the password set with `requirepass`. In this configuration Redis will deny any command executed by the just @@ -62,7 +64,9 @@ Otherwise, an error is returned and the clients needs to try a new password. When Redis ACLs are used, the command should be given in an extended way: - AUTH +{{< clients-example cmds_cnxmgmt auth2 >}} +AUTH "test-user" "strong_password" +{{< /clients-example >}} In order to authenticate the current connection with one of the connections defined in the ACL list (see [`ACL SETUSER`]({{< relref "/commands/acl-setuser" >}})) and the official [ACL guide]({{< relref "/operate/oss_and_stack/management/security/acl" >}}) for more information. diff --git a/content/commands/flushall/index.md b/content/commands/flushall/index.md index 1edccfcd4..36ae58f68 100644 --- a/content/commands/flushall/index.md +++ b/content/commands/flushall/index.md @@ -62,6 +62,10 @@ It is possible to use one of the following modifiers to dictate the flushing mod * `ASYNC`: flushes the databases asynchronously * `SYNC`: flushes the databases synchronously +{{< clients-example cmds_servermgmt flushall >}} +FLUSHALL SYNC +{{< /clients-example >}} + ## Notes * An asynchronous `FLUSHALL` command only deletes keys that were present at the time the command was invoked. Keys created during an asynchronous flush will be unaffected. diff --git a/content/commands/hgetall/index.md b/content/commands/hgetall/index.md index 380a76973..89e6d7709 100644 --- a/content/commands/hgetall/index.md +++ b/content/commands/hgetall/index.md @@ -53,6 +53,20 @@ of the reply is twice the size of the hash. ## Examples +{{< clients-example cmds_hash hgetall >}} +redis> HSET myhash field1 "Hello" +(integer) 1 +redis> HSET myhash field2 "World" +(integer) 1 +redis> HGETALL myhash +1) "field1" +2) "Hello" +3) "field2" +4) "World" +{{< /clients-example >}} + +Give these commands a try in the interactive console: + {{% redis-cli %}} HSET myhash field1 "Hello" HSET myhash field2 "World" diff --git a/content/commands/hvals/index.md b/content/commands/hvals/index.md index 3c958999b..99e10d1a1 100644 --- a/content/commands/hvals/index.md +++ b/content/commands/hvals/index.md @@ -51,6 +51,18 @@ Returns all values in the hash stored at `key`. ## Examples +{{< clients-example cmds_hash hvals >}} +redis> HSET myhash field1 "Hello" +(integer) 1 +redis> HSET myhash field2 "World" +(integer) 1 +redis> HVALS myhash +1) "Hello" +2) "World" +{{< /clients-example >}} + +Give these commands a try in the interactive console: + {{% redis-cli %}} HSET myhash field1 "Hello" HSET myhash field2 "World" diff --git a/content/commands/incrbyfloat/index.md b/content/commands/incrbyfloat/index.md index d07841a42..7a0ddc7f5 100644 --- a/content/commands/incrbyfloat/index.md +++ b/content/commands/incrbyfloat/index.md @@ -81,11 +81,11 @@ regardless of the actual internal precision of the computation. ## Examples {{% redis-cli %}} -SET mykey "10.50" -INCRBYFLOAT mykey "0.1" -INCRBYFLOAT mykey "-5" -SET mykey "5.0e3" -INCRBYFLOAT mykey "2.0e2" +SET mykey 10.50 +INCRBYFLOAT mykey 0.1 +INCRBYFLOAT mykey -5 +SET mykey 5.0e3 +INCRBYFLOAT mykey 2.0e2 {{% /redis-cli %}} diff --git a/content/commands/info/index.md b/content/commands/info/index.md index 7f0d494ce..3bc68741e 100644 --- a/content/commands/info/index.md +++ b/content/commands/info/index.md @@ -68,6 +68,12 @@ It can also take the following values: When no parameter is provided, the `default` option is assumed. +{{< clients-example cmds_servermgmt info >}} +INFO +{{< /clients-example >}} + +Give these commands a try in the interactive console: + {{% redis-cli %}} INFO {{% /redis-cli %}} diff --git a/content/commands/llen/index.md b/content/commands/llen/index.md index 7da4b3e67..1573e29bc 100644 --- a/content/commands/llen/index.md +++ b/content/commands/llen/index.md @@ -51,6 +51,17 @@ An error is returned when the value stored at `key` is not a list. ## Examples +{{< clients-example cmds_list llen >}} +redis> LPUSH mylist "World" +(integer) 1 +redis> LPUSH mylist "Hello" +(integer) 2 +redis> LLEN mylist +(integer) 2 +{{< /clients-example >}} + +Give these commands a try in the interactive console: + {{% redis-cli %}} LPUSH mylist "World" LPUSH mylist "Hello" diff --git a/content/commands/lpop/index.md b/content/commands/lpop/index.md index 09ba05f51..61c8c6780 100644 --- a/content/commands/lpop/index.md +++ b/content/commands/lpop/index.md @@ -65,6 +65,21 @@ to `count` elements, depending on the list's length. ## Examples +{{< clients-example cmds_list lpop >}} +redis> RPUSH mylist "one" "two" "three" "four" "five" +(integer) 5 +redis> LPOP mylist +"one" +redis> LPOP mylist 2 +1) "two" +2) "three" +redis> LRANGE mylist 0 -1 +1) "four" +2) "five" +{{< /clients-example>}} + +Give these commands a try in the interactive console: + {{% redis-cli %}} RPUSH mylist "one" "two" "three" "four" "five" LPOP mylist diff --git a/content/commands/lpush/index.md b/content/commands/lpush/index.md index 5ee7bb8e3..339d5dfab 100644 --- a/content/commands/lpush/index.md +++ b/content/commands/lpush/index.md @@ -69,10 +69,20 @@ So for instance the command `LPUSH mylist a b c` will result into a list containing `c` as first element, `b` as second element and `a` as third element. ## Examples +{{< clients-example cmds_list lpush >}} +redis> LPUSH mylist "world" +(integer) 1 +redis> LPUSH mylist "hello" +(integer) 2 +redis> LRANGE mylist 0 -1 +1) "hello" +2) "world" +{{< /clients-example >}} + +Give these commands a try in the interactive console: {{% redis-cli %}} LPUSH mylist "world" LPUSH mylist "hello" LRANGE mylist 0 -1 {{% /redis-cli %}} - diff --git a/content/commands/lrange/index.md b/content/commands/lrange/index.md index 92a6cc832..14c361e7f 100644 --- a/content/commands/lrange/index.md +++ b/content/commands/lrange/index.md @@ -80,6 +80,29 @@ the last element of the list. ## Examples +{{< clients-example cmds_list lrange >}} +redis> RPUSH mylist "one" +(integer) 1 +redis> RPUSH mylist "two" +(integer) 2 +redis> RPUSH mylist "three" +(integer) 3 +redis> LRANGE mylist 0 0 +1) "one" +redis> LRANGE mylist -3 2 +1) "one" +2) "two" +3) "three" +redis> LRANGE mylist -100 100 +1) "one" +2) "two" +3) "three" +redis> LRANGE mylist 5 10 +(empty array) +{{< /clients-example >}} + +Give these commands a try in the interactive console: + {{% redis-cli %}} RPUSH mylist "one" RPUSH mylist "two" @@ -89,4 +112,3 @@ LRANGE mylist -3 2 LRANGE mylist -100 100 LRANGE mylist 5 10 {{% /redis-cli %}} - diff --git a/content/commands/rpop/index.md b/content/commands/rpop/index.md index 1085d2d32..a7da667d4 100644 --- a/content/commands/rpop/index.md +++ b/content/commands/rpop/index.md @@ -65,10 +65,24 @@ to `count` elements, depending on the list's length. ## Examples +{{< clients-example cmds_list rpop >}} +redis> RPUSH mylist "one" "two" "three" "four" "five" +(integer) 5 +redis> RPOP mylist +"five" +redis> RPOP mylist 2 +1) "four" +2) "three" +redis> LRANGE mylist 0 -1 +1) "one" +2) "two" +{{< /clients-example >}} + +Give these commands a try in the interactive console: + {{% redis-cli %}} RPUSH mylist "one" "two" "three" "four" "five" RPOP mylist RPOP mylist 2 LRANGE mylist 0 -1 {{% /redis-cli %}} - diff --git a/content/commands/rpush/index.md b/content/commands/rpush/index.md index 8d5d9cdb1..c4367831d 100644 --- a/content/commands/rpush/index.md +++ b/content/commands/rpush/index.md @@ -70,6 +70,18 @@ containing `a` as first element, `b` as second element and `c` as third element. ## Examples +{{< clients-example cmds_list rpush >}} +redis> RPUSH mylist "hello" +(integer) 1 +redis> RPUSH mylist "world" +(integer) 2 +redis> LRANGE mylist 0 -1 +1) "hello" +2) "world" +{{< /clients-example >}} + +Give these commands a try in the interactive console: + {{% redis-cli %}} RPUSH mylist "hello" RPUSH mylist "world" diff --git a/content/commands/sadd/index.md b/content/commands/sadd/index.md index 95f7b932c..ae67f6e7b 100644 --- a/content/commands/sadd/index.md +++ b/content/commands/sadd/index.md @@ -64,10 +64,23 @@ An error is returned when the value stored at `key` is not a set. ## Examples +{{< clients-example cmds_set sadd >}} +redis> SADD myset "Hello" +(integer) 1 +redis> SADD myset "World" +(integer) 1 +redis> SADD myset "World" +(integer) 0 +redis> SMEMBERS myset +1) "Hello" +2) "World" +{{< /clients-example >}} + +Give these commands a try in the interactive console: + {{% redis-cli %}} SADD myset "Hello" SADD myset "World" SADD myset "World" SMEMBERS myset {{% /redis-cli %}} - diff --git a/content/commands/smembers/index.md b/content/commands/smembers/index.md index a4d74fd3e..8ba8ab695 100644 --- a/content/commands/smembers/index.md +++ b/content/commands/smembers/index.md @@ -53,9 +53,20 @@ This has the same effect as running [`SINTER`]({{< relref "/commands/sinter" >}} ## Examples +{{< clients-example cmds_set smembers >}} +redis> SADD myset "Hello" +(integer) 1 +redis> SADD myset "World" +(integer) 1 +redis> SMEMBERS myset +1) "Hello" +2) "World" +{{< /clients-example >}} + +Give these commands a try in the interactive console: + {{% redis-cli %}} SADD myset "Hello" SADD myset "World" SMEMBERS myset {{% /redis-cli %}} - diff --git a/content/develop/clients/dotnet/queryjson.md b/content/develop/clients/dotnet/queryjson.md index 922291923..422853a9d 100644 --- a/content/develop/clients/dotnet/queryjson.md +++ b/content/develop/clients/dotnet/queryjson.md @@ -10,7 +10,7 @@ categories: - kubernetes - clients description: Learn how to use the Redis query engine with JSON -linkTitle: JSON query example +linkTitle: Index and query JSON title: Example - Index and query JSON documents weight: 2 --- diff --git a/content/develop/clients/go/queryjson.md b/content/develop/clients/go/queryjson.md index eb0f17bc3..63777741f 100644 --- a/content/develop/clients/go/queryjson.md +++ b/content/develop/clients/go/queryjson.md @@ -10,7 +10,7 @@ categories: - kubernetes - clients description: Learn how to use the Redis query engine with JSON -linkTitle: JSON query example +linkTitle: Index and query JSON title: Example - Index and query JSON documents weight: 2 --- diff --git a/content/develop/clients/jedis/queryjson.md b/content/develop/clients/jedis/queryjson.md index 437206632..04e731958 100644 --- a/content/develop/clients/jedis/queryjson.md +++ b/content/develop/clients/jedis/queryjson.md @@ -10,7 +10,7 @@ categories: - kubernetes - clients description: Learn how to use the Redis query engine with JSON -linkTitle: JSON query example +linkTitle: Index and query JSON title: Example - Index and query JSON documents weight: 2 --- diff --git a/content/develop/clients/php/queryjson.md b/content/develop/clients/php/queryjson.md index 67ed7a6c7..13c2a4b21 100644 --- a/content/develop/clients/php/queryjson.md +++ b/content/develop/clients/php/queryjson.md @@ -10,7 +10,7 @@ categories: - kubernetes - clients description: Learn how to use the Redis query engine with JSON -linkTitle: JSON query example +linkTitle: Index and query JSON title: Example - Index and query JSON documents weight: 2 --- diff --git a/content/develop/clients/redis-py/queryjson.md b/content/develop/clients/redis-py/queryjson.md index cef016d40..189dd5e3a 100644 --- a/content/develop/clients/redis-py/queryjson.md +++ b/content/develop/clients/redis-py/queryjson.md @@ -10,7 +10,7 @@ categories: - kubernetes - clients description: Learn how to use the Redis query engine with JSON -linkTitle: JSON query example +linkTitle: Index and query JSON title: Example - Index and query JSON documents weight: 2 --- @@ -24,98 +24,51 @@ Make sure that you have Redis Stack and `redis-py` installed. Import dependencies: -```python -import redis -from redis.commands.json.path import Path -import redis.commands.search.aggregation as aggregations -import redis.commands.search.reducers as reducers -from redis.commands.search.field import TextField, NumericField, TagField -from redis.commands.search.indexDefinition import IndexDefinition, IndexType -from redis.commands.search.query import NumericFilter, Query -``` +{{< clients-example py_home_json import >}} +{{< /clients-example >}} Connect to your Redis database. -```python -r = redis.Redis(host='localhost', port=6379) -``` - -Let's create some test data to add to your database. - -```python -user1 = { - "name": "Paul John", - "email": "paul.john@example.com", - "age": 42, - "city": "London" -} -user2 = { - "name": "Eden Zamir", - "email": "eden.zamir@example.com", - "age": 29, - "city": "Tel Aviv" -} -user3 = { - "name": "Paul Zamir", - "email": "paul.zamir@example.com", - "age": 35, - "city": "Tel Aviv" -} -``` - -Define indexed fields and their data types using `schema`. Use JSON path expressions to map specific JSON elements to the schema fields. - -```python -schema = ( - TextField("$.name", as_name="name"), - TagField("$.city", as_name="city"), - NumericField("$.age", as_name="age") -) -``` - -Create an index. In this example, all JSON documents with the key prefix `user:` will be indexed. For more information, see [Query syntax]({{< relref "/develop/interact/search-and-query/query/" >}}). - -```python -rs = r.ft("idx:users") -rs.create_index( - schema, - definition=IndexDefinition( - prefix=["user:"], index_type=IndexType.JSON - ) -) -# b'OK' -``` - -Use [`JSON.SET`]({{< baseurl >}}/commands/json.set/) to set each user value at the specified path. - -```python -r.json().set("user:1", Path.root_path(), user1) -r.json().set("user:2", Path.root_path(), user2) -r.json().set("user:3", Path.root_path(), user3) -``` - -Let's find user `Paul` and filter the results by age. - -```python -res = rs.search( - Query("Paul @age:[30 40]") -) -# Result{1 total, docs: [Document {'id': 'user:3', 'payload': None, 'json': '{"name":"Paul Zamir","email":"paul.zamir@example.com","age":35,"city":"Tel Aviv"}'}]} -``` - -Query using JSON Path expressions. - -```python -rs.search( - Query("Paul").return_field("$.city", as_field="city") -).docs -# [Document {'id': 'user:1', 'payload': None, 'city': 'London'}, Document {'id': 'user:3', 'payload': None, 'city': 'Tel Aviv'}] -``` - -Aggregate your results using [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate/). - -```python -req = aggregations.AggregateRequest("*").group_by('@city', reducers.count().alias('count')) -print(rs.aggregate(req).rows) -# [[b'city', b'Tel Aviv', b'count', b'2'], [b'city', b'London', b'count', b'1']] -``` \ No newline at end of file +{{< clients-example py_home_json connect >}} +{{< /clients-example >}} + +Create some test data to add to your database. + +{{< clients-example py_home_json create_data >}} +{{< /clients-example >}} + +Create an index. In this example, only JSON documents with the key prefix `user:` are indexed. For more information, see [Query syntax]({{< relref "/develop/interact/search-and-query/query/" >}}). + +{{< clients-example py_home_json make_index >}} +{{< /clients-example >}} + +Add the three sets of user data to the database as +[JSON]({{< relref "/develop/data-types/json" >}}) objects. +If you use keys with the `user:` prefix then Redis will index the +objects automatically as you add them: + +{{< clients-example py_home_json add_data >}} +{{< /clients-example >}} + +You can now use the index to search the JSON objects. The +[query]({{< relref "/develop/interact/search-and-query/query" >}}) +below searches for objects that have the text "Paul" in any field +and have an `age` value in the range 30 to 40: + +{{< clients-example py_home_json query1 >}} +{{< /clients-example >}} + +Specify query options to return only the `city` field: + +{{< clients-example py_home_json query2 >}} +{{< /clients-example >}} + +Use an +[aggregation query]({{< relref "/develop/interact/search-and-query/query/aggregation" >}}) +to count all users in each city. + +{{< clients-example py_home_json query3 >}} +{{< /clients-example >}} + +See the [Redis query engine]({{< relref "/develop/interact/search-and-query" >}}) docs +for a full description of all query features with examples. diff --git a/content/develop/data-types/sorted-sets.md b/content/develop/data-types/sorted-sets.md index ec81ef80a..c85857c28 100644 --- a/content/develop/data-types/sorted-sets.md +++ b/content/develop/data-types/sorted-sets.md @@ -55,7 +55,7 @@ Let's start with a simple example, we'll add all our racers and the score they g As you can see [`ZADD`]({{< relref "/commands/zadd" >}}) is similar to [`SADD`]({{< relref "/commands/sadd" >}}), but takes one additional argument (placed before the element to be added) which is the score. [`ZADD`]({{< relref "/commands/zadd" >}}) is also variadic, so you are free to specify multiple score-value -pairs, even if this is not used in the example above. +pairs, as shown in the example above. With sorted sets it is trivial to return a list of racers sorted by their birth year because actually *they are already sorted*. diff --git a/content/develop/interact/search-and-query/advanced-concepts/vectors.md b/content/develop/interact/search-and-query/advanced-concepts/vectors.md index 8074a833c..47c61c5c2 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/vectors.md +++ b/content/develop/interact/search-and-query/advanced-concepts/vectors.md @@ -483,6 +483,33 @@ FT.SEARCH products "(@type:{shirt} @year:[2020 2022]) | @description_vector:[VEC To explore additional Python vector search examples, review recipes for the [`Redis Python`](https://github.com/redis-developer/redis-ai-resources/blob/main/python-recipes/vector-search/00_redispy.ipynb) client library and the [`Redis Vector Library`](https://github.com/redis-developer/redis-ai-resources/blob/main/python-recipes/vector-search/01_redisvl.ipynb). +## Memory consumption comparison + +Following is a Python+NumPy example of vector sizes for the supported vector types; `BFLOAT16`, `FLOAT16`, `FLOAT32`, and `FLOAT64`. + +```python +import numpy as np + +#install ml_dtypes from pip install ml-dtypes +from ml_dtypes import bfloat16 + +# random float64 100 dimensions +double_precision_vec = np.random.rand(100) + +# for float64 and float32 +print(f'length of float64 vector: {len(double_precision_vec.tobytes())}') # >>> 800 +print(f'length of float32 vector: {len(double_precision_vec.astype(np.float32).tobytes())}') # >>> 400 + +# for float16 +np_data_type = np.float16 +half_precision_vec_float16 = double_precision_vec.astype(np_data_type) +print(f'length of float16 vector: {len(half_precision_vec_float16.tobytes())}') # >>> 200 + +# for bfloat16 +bfloat_dtype = bfloat16 +half_precision_vec_bfloat16 = double_precision_vec.astype(bfloat_dtype) +print(f'length of bfloat16 vector: {len(half_precision_vec_bfloat16.tobytes())}') # >>> 200 +``` ## Next steps diff --git a/content/develop/interact/search-and-query/best-practices/dev-to-prod-best-practices.md b/content/develop/interact/search-and-query/best-practices/dev-to-prod-best-practices.md new file mode 100644 index 000000000..1916ba5b0 --- /dev/null +++ b/content/develop/interact/search-and-query/best-practices/dev-to-prod-best-practices.md @@ -0,0 +1,105 @@ +--- +Title: Move from Development to Production with Redis Query Engine +alwaysopen: false +categories: +- docs +- develop +- stack +- oss +- kubernetes +- clients +linkTitle: RQE DEV to PROD +weight: 2 +--- + +Transitioning a Redis Community Edition with Redis Query Engine (RQE) environment from development to production requires thoughtful consideration of configuration, performance tuning, and resource allocation. This guide outlines key practices to ensure your Redis deployment operates optimally under production workloads. + +## Configuration parameter considerations + +RQE offers several configurable parameters that influence query results and performance. While a full list of these parameters and their functions can be found [here]({{< relref "/develop/interact/search-and-query/advanced-concepts/dialects" >}}), this section highlights the most commonly adjusted parameters for production environments. + +### 1. `TIMEOUT` + +- Purpose: limits the duration a query is allowed to execute. +- Default: 500 milliseconds. +- Behavior: + - Ensures that queries do not monopolize the main Redis thread. + - If a query exceeds the `TIMEOUT` value, its outcome is determined by the `ON_TIMEOUT` setting: + - `FAIL`: the query will return an error. + - `PARTIAL`: this setting will return the top results accumulated by the query until it timed out. +- Recommendations: + - Caution: be mindful when increasing `TIMEOUT`, as long-running queries can degrade overall system performance. + + +### 2. `MINPREFIX` + +- Purpose: sets the minimum number of characters required for wildcard searches. +- Default: 2 characters. +- Behavior: + - Queries like `he*` are valid, while `h*` would not meet the threshold. +- Recommendations: + - Lowering this value to 1 can significantly increase result sets, which may lead to degraded performance. + - Keep the default unless there is a strong use case for single-character wildcards. + +### 3. `MAXPREFIXEXPANSIONS` + +- Purpose: Defines the maximum number of expansions for a wildcard query term. +- Default: 200 expansions. +- Behavior: + - Expansions: when a wildcard query term is processed, Redis generates a list of all possible matches from the index that satisfy the wildcard. For example, the query he* might expand to terms like hello, hero, and heat. Each of these matches is an "expansion." + - This parameter limits how many of these expansions Redis will generate and process. If the number of possible matches exceeds the limit, the query may return incomplete results or fail, depending on the query context. +- Recommendations: + - Avoid increasing this parameter excessively, as it can lead to performance bottlenecks during query execution. + - If wildcard searches are common, consider optimizing your index to reduce the reliance on large wildcard expansions. + +### 4. `DEFAULT_DIALECT` + +- Purpose: specifies the default query dialect used by [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) and [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate) commands. +- Default: [Dialect 1]({{< relref "/develop/interact/search-and-query/advanced-concepts/dialects" >}}). +- Recommendations: + - Update the default to [**Dialect 4**]({{< relref "/develop/interact/search-and-query/advanced-concepts/dialects#dialect-4" >}}) for better performance and access to advanced features. + - Individual commands can override this parameter if necessary, but setting a higher default ensures consistent performance across queries. + +## Testing + +### 1. Correctness +- Run a few test queries and check the results are what you expect. +- Use the following tools to validate and debug: + - Redis CLI: use the [`MONITOR`]({{< baseurl >}}/commands/monitor) command or [profiling features]({{< relref "/develop/tools/insight#profiler" >}}) in Redis Insight to analyze commands. + - [`FT.PROFILE`]({{< baseurl >}}/commands/ft.profile): Provides detailed insights into individual query execution paths, helping identify bottlenecks and inefficiencies. + +### 2. Performance +- Test query performance in a controlled test environment that mirrors production as closely as possible. +- Use tools like `memtier_benchmark` or custom test applications to simulate load. +- Network Considerations: + - Minimize latency during testing by locating test clients in the same network as the Redis instance. + - For Redis Cloud, ensure test machines are in a **VPC-peered environment** with the target Redis database. + +## Sizing requirements + +Redis Search has resource requirements distinct from general caching use cases. Proper sizing ensures that the system can handle production workloads efficiently. + +### Key considerations: +1. CPU: + - Adequate CPU resources are critical. + - Ensure CPUs are not over-subscribed with search threads and shard processes. +2. RAM: + - Plan for sufficient memory to store the dataset and indexes, plus overhead for operations. +3. Network: + - High throughput and low latency are essential, particularly for applications with demanding query patterns. + +### Tools: +- Use the [Redis Search Sizing Calculator](https://redis.io/redisearch-sizing-calculator/) to estimate resource requirements based on your dataset and workload. + +## Demand spikes + +Production environments must be sized for peak load scenarios to ensure performance remains acceptable under maximum stress. + +### Recommendations: +1. Plan for Spikes: + - If query workloads are expected to vary significantly, ensure the infrastructure can handle peak loads. + - Monitor real-world usage patterns and adjust capacity as needed. +2. Autoscaling: + - Consider using autoscaling strategies in cloud environments to dynamically adjust resources based on load. + +By following these best practices, you can ensure a smooth and efficient transition from development to production with Redis Community Edition and RQE. Proper configuration, rigorous testing, and careful resource planning are critical to delivering a reliable and high-performance Redis deployment. diff --git a/content/develop/interact/search-and-query/indexing/geoindex.md b/content/develop/interact/search-and-query/indexing/geoindex.md index 3df944e47..ce31baa40 100644 --- a/content/develop/interact/search-and-query/indexing/geoindex.md +++ b/content/develop/interact/search-and-query/indexing/geoindex.md @@ -35,32 +35,32 @@ reference page for a full description of both types. The following command creates a `GEO` index for JSON objects that contain the geospatial data in a field called `location`: -```bash +{{< clients-example geoindex create_geo_idx >}} > FT.CREATE productidx ON JSON PREFIX 1 product: SCHEMA $.location AS location GEO OK -``` +{{< /clients-example >}} If you now add JSON objects with the `product:` prefix and a `location` field, they will be added to the index automatically: -```bash +{{< clients-example geoindex add_geo_json >}} > JSON.SET product:46885 $ '{"description": "Navy Blue Slippers","price": 45.99,"city": "Denver","location": "-104.991531, 39.742043"}' OK > JSON.SET product:46886 $ '{"description": "Bright Green Socks","price": 25.50,"city": "Fort Collins","location": "-105.0618814,40.5150098"}' OK -``` +{{< /clients-example >}} The query below finds products within a 100 mile radius of Colorado Springs (Longitude=-104.800644, Latitude=38.846127). This returns only the location in Denver, but a radius of 200 miles would also include the location in Fort Collins: -```bash +{{< clients-example geoindex geo_query >}} > FT.SEARCH productidx '@location:[-104.800644 38.846127 100 mi]' 1) "1" 2) "product:46885" 3) 1) "$" 2) "{\"description\":\"Navy Blue Slippers\",\"price\":45.99,\"city\":\"Denver\",\"location\":\"-104.991531, 39.742043\"}" -``` +{{< /clients-example >}} See [Geospatial queries]({{< relref "/develop/interact/search-and-query/query/geo-spatial" >}}) for more information about the available options. @@ -73,14 +73,14 @@ of the field definition specifies Cartesian coordinates instead of the default spherical geographical coordinates. Use `SPHERICAL` in place of `FLAT` to choose the coordinate space explicitly. -```bash +{{< clients-example geoindex create_gshape_idx >}} > FT.CREATE geomidx ON JSON PREFIX 1 shape: SCHEMA $.name AS name TEXT $.geom AS geom GEOSHAPE FLAT OK -``` +{{< /clients-example >}} Use the `shape:` prefix for the JSON objects to add them to the index: -```bash +{{< clients-example geoindex add_gshape_json >}} > JSON.SET shape:1 $ '{"name": "Green Square", "geom": "POLYGON ((1 1, 1 3, 3 3, 3 1, 1 1))"}' OK > JSON.SET shape:2 $ '{"name": "Red Rectangle", "geom": "POLYGON ((2 2.5, 2 3.5, 3.5 3.5, 3.5 2.5, 2 2.5))"}' @@ -89,20 +89,20 @@ OK OK > JSON.SET shape:4 $ '{"name": "Purple Point", "geom": "POINT (2 2)"}' OK -``` +{{< /clients-example >}} You can now run various geospatial queries against the index. For example, the query below returns any shapes within the boundary of the green square but omits the green square itself: -```bash +{{< clients-example geoindex gshape_query >}} > FT.SEARCH geomidx "(-@name:(Green Square) @geom:[WITHIN $qshape])" PARAMS 2 qshape "POLYGON ((1 1, 1 3, 3 3, 3 1, 1 1))" RETURN 1 name DIALECT 4 1) (integer) 1 2) "shape:4" 3) 1) "name" 2) "[\"Purple Point\"]" -``` +{{< /clients-example >}} You can also run queries to find whether shapes in the index completely contain or overlap each other. See diff --git a/content/embeds/hardware-requirements-embed.md b/content/embeds/hardware-requirements-embed.md index 6bd7d54c2..321d8f172 100644 --- a/content/embeds/hardware-requirements-embed.md +++ b/content/embeds/hardware-requirements-embed.md @@ -24,11 +24,13 @@ We recommend these hardware requirements for production systems or for developme | Item | Description | Minimum requirements | Recommended | |------------|-----------------|------------|-----------------| | Nodes[1](#table-note-1) per cluster | At least three nodes are required to support a reliable, highly available deployment that handles process failure, node failure, and network split events in a consistent manner. | 3 nodes | >= 3 nodes (Must be an odd number of nodes) | -| Cores[2](#table-note-2) per node | Redis Enterprise Software is based on a multi-tenant architecture and can run multiple Redis processes (or shards) on the same core without significant performance degradation. | 4 cores | >=8 cores | -| RAM[3](#table-note-3) per node | Defining your RAM size must be part of the capacity planning for your Redis usage. | 15GB | >=30GB | -| Ephemeral Storage | Used for storing [replication files (RDB format) and cluster log files]({{< relref "/operate/rs/installing-upgrading/install/plan-deployment/persistent-ephemeral-storage" >}}). | RAM x 2 | >= RAM x 4 | -| Persistent Storage | Used for storing [snapshot (RDB format) and AOF files]({{< relref "/operate/rs/installing-upgrading/install/plan-deployment/persistent-ephemeral-storage" >}}) over a persistent storage media, such as AWS Elastic Block Storage (EBS) or Azure Data Disk. | RAM x 3 | In-memory >= RAM x 6 (except for [extreme 'write' scenarios]({{< relref "/operate/rs/clusters/optimize/disk-sizing-heavy-write-scenarios" >}}))

[Auto Tiering]({{< relref "/operate/rs/databases/auto-tiering/" >}}) >= (RAM + Flash) x 5. | -| Network[4](#table-note-4) | We recommend using multiple NICs per node where each NIC is >100Mbps, but Redis Enterprise Software can also run over a single 1Gbps interface network used for processing application requests, inter-cluster communication, and storage access. | 1G | >=10G | +| Cores[2](#table-note-2) per node | Redis Enterprise Software is based on a multi-tenant architecture and can run multiple Redis processes (or shards) on the same core without significant performance degradation. | 2 cores | >=8 cores | +| RAM[3](#table-note-3) per node | Defining your RAM size must be part of the capacity planning for your Redis usage. | 8GB | >=32GB | +| Ephemeral storage | Used for storing [replication files (RDB format) and cluster log files]({{< relref "/operate/rs/installing-upgrading/install/plan-deployment/persistent-ephemeral-storage" >}}). | RAM x 2 | >= RAM x 4 | +| Persistent storage[4](#table-note-4) | Used for storing [snapshot (RDB format) and AOF files]({{< relref "/operate/rs/installing-upgrading/install/plan-deployment/persistent-ephemeral-storage" >}}) over a persistent storage media, such as AWS Elastic Block Storage (EBS) or Azure Data Disk. | RAM x 3 | In-memory >= RAM x 4 (except for [extreme 'write' scenarios]({{< relref "/operate/rs/clusters/optimize/disk-sizing-heavy-write-scenarios" >}}))

[Auto Tiering]({{< relref "/operate/rs/databases/auto-tiering/" >}}) >= (RAM + Flash) x 4. | +| Network[5](#table-note-5) | We recommend using multiple NICs per node where each NIC is >1Gbps, but Redis Enterprise Software can also run over a single 1Gbps interface network used for processing application requests, inter-cluster communication, and storage access. | 1G | >=10G | +| Local disk for [Auto Tiering]({{< relref "/operate/rs/databases/auto-tiering/" >}}) | used to to extend databases DRAM capacity with solid state drives (SSDs). Flash memory must be locally attached. [Read more]({{< relref "/operate/rs/databases/auto-tiering/" >}}) | (RAM+Flash) x 1.6 | (RAM+Flash) x 2.5 | + Additional considerations: @@ -48,7 +50,7 @@ Additional considerations: - If some of the cluster nodes are utilizing more than 80% of the CPU, consider migrating busy resources to less busy nodes. - - If all the cluster nodes are utilizing over 80% of the CPU, consider scaling out the cluster by [adding a node]({{< relref "/operate/rs/clusters/add-node" >}}). + - If all the cluster nodes are utilizing over 80% of the CPU, highly consider scaling out the cluster by [adding a node]({{< relref "/operate/rs/clusters/add-node" >}}). 3. RAM: @@ -56,10 +58,15 @@ Additional considerations: - If one or more cluster nodes utilizes more than 65% of the RAM, consider migrating resources to less active nodes. - - If all cluster nodes are utilizing more than 70% of available RAM, consider [adding a node]({{< relref "/operate/rs/clusters/add-node" >}}). + - If all cluster nodes are utilizing more than 70% of available RAM, highly consider [adding a node]({{< relref "/operate/rs/clusters/add-node" >}}). - Do not run any other memory-intensive processes on the Redis Enterprise Software node. -4. Network: +4. Persistent storage: + + - If no databases on the cluster have [persistence]({{< relref "/operate/rs/installing-upgrading/install/plan-deployment/persistent-ephemeral-storage" >}}) enabled, minimum persistent storage is RAM x 1.1 and the recommended persistent storage is RAM x 2. Persistent storage is essential because Redis Enterprise also uses it to maintain the cluster and database health, configurations, recovery procedures, and more. + +5. Network: - Only static IP addresses are supported to ensure nodes remain part of the cluster after a reboot. + diff --git a/content/integrate/redis-data-integration/data-pipelines/prepare-dbs/sql-server.md b/content/integrate/redis-data-integration/data-pipelines/prepare-dbs/sql-server.md index 8f1123e25..3723aa0c8 100644 --- a/content/integrate/redis-data-integration/data-pipelines/prepare-dbs/sql-server.md +++ b/content/integrate/redis-data-integration/data-pipelines/prepare-dbs/sql-server.md @@ -16,66 +16,82 @@ type: integration weight: 2 --- -To prepare your SQL Server database for Debezium, you must first run a query to -enable CDC globally and then separately enable CDC for each table you want to +To prepare your SQL Server database for Debezium, you must first create a dedicated Debezium user, +run a script to enable CDC globally, and then separately enable CDC for each table you want to capture. You need administrator privileges to do this. Once you enable CDC, it captures all of the INSERT, UPDATE, and DELETE operations -on your chosen tables. The Debezium connector can then emit these events to -[Kafka topics](https://kafka.apache.org/intro#intro_concepts_and_terms). +on your chosen tables. The Debezium connector can then emit these events to RDI. -## 1. Enable CDC on the database +## 1. Create a Debezium user -There are two system stored procedures to enable CDC (you need -administrator privileges to run these). Use `sys.sp_cdc_enable_db` -to enable CDC for the whole database and then -You can run the procedure with SQL Server Management Studio or with -Transact-SQL. - -Before running the procedure, ensure that: - -- You are a member of the `sysadmin` fixed server role for the SQL Server. -- You are a `db_owner` of the database. -- The SQL Server Agent is running. - -Then, follow the steps below to enable CDC: +It is strongly recommended to create a dedicated Debezium user for the connection between RDI +and the source database. When using an existing user, ensure that the required +permissions are granted and that the user is added to the CDC role. -1. From the **View** menu in SQL Server Management Studio, click **Template Explorer**. +1. Create a user with the Transact-SQL below: -1. In the Template Browser, expand **SQL Server Templates**. + ```sql + USE master + GO + CREATE LOGIN MyUser WITH PASSWORD = 'My_Password' + GO + USE MyDB + GO + CREATE USER MyUser FOR LOGIN MyUser + GO + ``` -1. Expand **Change Data Capture > Configuration** and then click **Enable Database for CDC**. + Replace `MyUser`, `My_Password` and `MyDB` with your chosen values. -1. In the template, replace the database name in the `USE` statement with the name of the - database where you want to enable CDC. For example, if your database was called - `myDB`, the template would be: +1. Grant required permissions: ```sql + USE master + GO + GRANT VIEW SERVER STATE TO MyUser + GO USE MyDB GO - EXEC sys.sp_cdc_enable_db + EXEC sp_addrolemember N'db_datareader', N'MyUser' GO ``` -1. Run the stored procedure `sys.sp_cdc_enable_db` to enable CDC for the database. +## 2. Enable CDC on the database -When you enable CDC for the database, it creates a schema called `cdc` and also -a CDC user, metadata tables, and other system objects. +There are two system stored procedures to enable CDC (you need +administrator privileges to run these). Use `sys.sp_cdc_enable_db` +to enable CDC for the whole database and then `sys.sp_cdc_enable_table` to enable CDC for individual tables. + +Before running the procedures, ensure that: -Keep the **Change Data Capture > Configuration** foldout open in the Template Explorer -because you will need it to enable CDC on the individual tables next. +- You are a member of the `sysadmin` fixed server role for the SQL Server. +- You are a `db_owner` of the database. +- The SQL Server Agent is running. + +Then, assuming your database is called `MyDB`, run the script below to enable CDC: -## 2. Enable CDC for the tables you want to capture +```sql +USE MyDB +GO +EXEC sys.sp_cdc_enable_db +GO +``` -You must also enable CDC on the tables you want Debezium to capture using the -following steps (again, you need administrator privileges for this): +{{< note >}}For SQL Server on AWS RDS, you must use a different stored procedure: +```sql +EXEC msdb.dbo.rds_cdc_enable_db 'Chinook' +GO +``` +{{< /note >}} + +When you enable CDC for the database, it creates a schema called `cdc` and also +a CDC user, metadata tables, and other system objects. -1. With the **Change Data Capture > Configuration** foldout still open in the - Template Explorer, select **Enable Table Specifying Filegroup Option**. +## 3. Enable CDC for the tables you want to capture -1. In the template, replace the table name in the USE statement with the name of - the table you want to capture. For example, if your table was called `MyTable` - then the template would look like the following: +1. You must also enable CDC on the tables you want Debezium to capture using the +following commands (again, you need administrator privileges for this): ```sql USE MyDB @@ -85,38 +101,34 @@ following steps (again, you need administrator privileges for this): @source_schema = N'dbo', @source_name = N'MyTable', @role_name = N'MyRole', - @filegroup_name = N'MyDB_CT', @supports_net_changes = 0 GO ``` + + Repeat this for every table you want to capture. + + {{< note >}}The value for `@role_name` can’t be a fixed database role, such as `db_datareader`. + Specifying a new name will create a corresponding database role that has full access to the + captured change data. + {{< /note >}} -1. Run the stored procedure `sys.sp_cdc_enable_table` to enable CDC for - the table. +1. Add the Debezium user to the CDC role: -1. Repeat steps 1 to 3 for every table you want to capture. + ```sql + USE MyDB + GO + EXEC sp_addrolemember N'MyRole', N'MyUser' + GO + ``` -## 3. Check that you have access to the CDC table +## 4. Check that you have access to the CDC table You can use another stored procedure `sys.sp_cdc_help_change_data_capture` to query the CDC information for the database and check you have enabled -it correctly. Before doing this, check that: - -* You have `SELECT` permission on all of the captured columns of the capture instance. - If you are a member of the `db_owner` database role then you can view information for - all of the defined capture instances. -* You are a member of any gating roles that are defined for the table that the query includes. - -Follow the steps below to run `sys.sp_cdc_help_change_data_capture`: - -1. From the **View** menu in SQL Server Management Studio, click **Object Explorer**. - -1. From the Object Explorer, expand **Databases**, and then expand your database - object, for example, `MyDB`. - -1. Expand **Programmability > Stored Procedures > System Stored Procedures**. +it correctly. To do this, connect as the Debezium user you created previously (`MyUser`). 1. Run the `sys.sp_cdc_help_change_data_capture` stored procedure to query - the table. For example, if your database was called `MyDB` then you would + the CDC configuration. For example, if your database was called `MyDB` then you would run the following: ```sql @@ -131,14 +143,31 @@ Follow the steps below to run `sys.sp_cdc_help_change_data_capture`: access. If the result is empty then you should check that you have privileges to access both the capture instance and the CDC tables. -## SQL Server on Azure +### Troubleshooting -You can also use the Debezium SQL Server connector with SQL Server on Azure. -See Microsoft's guide to -[configuring SQL Server on Azure for CDC with Debezium](https://learn.microsoft.com/en-us/samples/azure-samples/azure-sql-db-change-stream-debezium/azure-sql%2D%2Dsql-server-change-stream-with-debezium/) -for more information. +If no CDC is happening then it might mean that SQL Server Agent is down. You can check for this using the SQL query shown below: + +```sql +IF EXISTS (SELECT 1 + FROM master.dbo.sysprocesses + WHERE program_name = N'SQLAgent - Generic Refresher') +BEGIN + SELECT @@SERVERNAME AS 'InstanceName', 1 AS 'SQLServerAgentRunning' +END +ELSE +BEGIN + SELECT @@SERVERNAME AS 'InstanceName', 0 AS 'SQLServerAgentRunning' +END +``` + +If the query returns a result of 0, you need to need to start SQL Server Agent using the following commands: -### SQL Server capture job agent configuration parameters +```sql +EXEC xp_servicecontrol N'START',N'SQLServerAGENT'; +GO +``` + +## SQL Server capture job agent configuration parameters In SQL Server, the parameters that control the behavior of the capture job agent are defined in the SQL Server table `msdb.dbo.cdc_jobs`. If you experience performance @@ -169,6 +198,13 @@ of the Debezium SQL Server connector: See the SQL Server documentation for more information about capture agent parameters. +## SQL Server on Azure + +You can also use the Debezium SQL Server connector with SQL Server on Azure. +See Microsoft's guide to +[configuring SQL Server on Azure for CDC with Debezium](https://learn.microsoft.com/en-us/samples/azure-samples/azure-sql-db-change-stream-debezium/azure-sql%2D%2Dsql-server-change-stream-with-debezium/) +for more information. + ## Handling changes to the schema RDI can't adapt automatically when you change the schema of a CDC table in SQL Server. For example, @@ -186,19 +222,28 @@ documentation for further details. 1. Create a new capture table for the updated source table by running the `sys.sp_cdc_enable_table` stored procedure with a new, unique value for the parameter `@capture_instance`. For example, if the old value - was `dbo_customers`, you could replace it with `dbo_customers_v2`: + was `dbo_MyTable`, you could replace it with `dbo_MyTable_v2` (you can see the existing values by running + stored procedure `sys.sp_cdc_help_change_data_capture`): ```sql - EXEC sys.sp_cdc_enable_table @source_schema = 'dbo', @source_name = 'customers', @role_name = NULL, @supports_net_changes = 0, @capture_instance = 'dbo_customers_v2'; + EXEC sys.sp_cdc_enable_table + @source_schema = N'dbo', + @source_name = N'MyTable', + @role_name = N'MyRole', + @capture_instance = N'dbo_MyTable_v2', + @supports_net_changes = 0 GO ``` 1. When Debezium starts streaming from the new capture table, drop the old capture table by running the `sys.sp_cdc_disable_table` stored procedure with the parameter `@capture_instance` set to the old - capture instance name, `dbo_customers`: + capture instance name, `dbo_MyTable`: ```sql - EXEC sys.sp_cdc_disable_table @source_schema = 'dbo', @source_name = 'dbo_customers', @capture_instance = 'dbo_customers'; + EXEC sys.sp_cdc_disable_table + @source_schema = N'dbo', + @source_name = N'MyTable', + @capture_instance = N'dbo_MyTable' GO ``` diff --git a/content/operate/oss_and_stack/_index.md b/content/operate/oss_and_stack/_index.md index a19533f46..46ec8b500 100644 --- a/content/operate/oss_and_stack/_index.md +++ b/content/operate/oss_and_stack/_index.md @@ -2,4 +2,12 @@ title: Redis Community Edition and Stack description: Operate Redis Community Edition and Redis Stack. Redis OSS was renamed Redis Community Edition (CE) with the v7.4 release. linkTitle: Redis Community Edition and Stack ---- \ No newline at end of file + +categories: +- docs +- operate +- oss +- stack +--- + + diff --git a/content/operate/oss_and_stack/reference/cluster-spec.md b/content/operate/oss_and_stack/reference/cluster-spec.md index 0f51cb0b6..a61df8846 100644 --- a/content/operate/oss_and_stack/reference/cluster-spec.md +++ b/content/operate/oss_and_stack/reference/cluster-spec.md @@ -198,7 +198,7 @@ Then instead of hashing the key, only what is between the first occurrence of `{ Examples: * The two keys `{user1000}.following` and `{user1000}.followers` will hash to the same hash slot since only the substring `user1000` will be hashed in order to compute the hash slot. -* For the key `foo{}{bar}` the whole key will be hashed as usually since the first occurrence of `{` is followed by `}` on the right without characters in the middle. +* For the key `foo{}{bar}` the whole key will be hashed as usual since the first occurrence of `{` is followed by `}` on the right without characters in the middle. * For the key `foo{{bar}}zap` the substring `{bar` will be hashed, because it is the substring between the first occurrence of `{` and the first occurrence of `}` on its right. * For the key `foo{bar}{zap}` the substring `bar` will be hashed, since the algorithm stops at the first valid or invalid (without bytes inside) match of `{` and `}`. * What follows from the algorithm is that if the key starts with `{}`, it is guaranteed to be hashed as a whole. This is useful when using binary data as key names. diff --git a/content/operate/rc/changelog/may-2024.md b/content/operate/rc/changelog/may-2024.md index e8a71f0f4..d4fc70172 100644 --- a/content/operate/rc/changelog/may-2024.md +++ b/content/operate/rc/changelog/may-2024.md @@ -42,7 +42,7 @@ For Redis Cloud Essentials, the plan size refers to the full memory limit, not t For Redis Cloud Pro, you define your dataset size when you create the database, and we calculate your total memory limit automatically based on the features you choose. -See [Dataset size]({{< relref "/operate/rc/databases/configuration/clustering#dataset-size" >}}) for more information. +See [Dataset size]({{< relref "/operate/rc/databases/configuration/sizing#dataset-size" >}}) for more information. ## Deprecations diff --git a/content/operate/rc/databases/configuration/clustering.md b/content/operate/rc/databases/configuration/clustering.md index ba2aea7e4..d9576c41f 100644 --- a/content/operate/rc/databases/configuration/clustering.md +++ b/content/operate/rc/databases/configuration/clustering.md @@ -165,44 +165,11 @@ their order to suit your application's requirements. - **PCRE_ANCHORED:** the pattern is constrained to match only at the start of the string which is being searched. -## Dataset size {#dataset-size} - -The dataset size of a database is a part of the full memory limit for the database. The memory limit represents the maximum amount of memory for the database, which includes data values, keys, module data, and overhead for specific features. High availability features, such as replication and Active-Active, increase memory consumption, so in those cases your dataset size and memory limit will be different. - -For Redis Cloud Essentials, the plan size refers to the full memory limit, not the dataset size. Both the total memory limit and dataset size are listed under **Database details** when you create an Essentials database. - -For Redis Cloud Pro, you define your dataset size when you create the database, and we calculate your total memory limit based on the features you choose. - -Here are some general guidelines: - -- Memory limit represents an upper limit. You cannot store more data than the memory limit. Depending on your other selections, available memory for data may be less than expected. - -- [Replication]({{< relref "/operate/rc/databases/configuration/high-availability" >}}) doubles memory consumption; that is, 512 MB of data requires at least 1 GB of memory limit when replication is enabled. This affects both Redis Cloud Pro and Redis Cloud Essentials. For example, if you subscribe to a 1 GB Essentials plan, Redis will allocate 512 MB for your dataset and the other 512 MB for replication. - -- [Active-Active]({{< relref "/operate/rc/databases/configuration/active-active-redis" >}}) also doubles memory consumption and the effect is cumulative with replication's impact. Since Active-Active requires replication to be turned on, the memory limit impact can be as large as four times (4x) the original data size. - -- [Advanced capabilities]({{< relref "/operate/rc/databases/configuration/advanced-capabilities" >}}) also consume memory. For search databases, consider index size when you size your database. - -Memory limits in Redis Cloud are subject to the same considerations as Redis Enterprise Software; to learn more, see [Database memory limits]({{< relref "/operate/rs/databases/memory-performance/memory-limit" >}}). - -## Throughput - -Throughput is the amount of operations a database can handle over a certain period of time. For Redis Cloud databases, throughput is defined in operations per second (ops/sec). - -For a Redis Cloud Pro subscription, you define throughput for a database when you create it. For a Redis Cloud Essentials subscription, your maximum throughput depends on your plan. - -We assume a typical workload that includes a different mix of commands and an average key and value size of 1KB. Therefore, your actual throughput may be higher or lower than the throughput you set when you create your database. The following properties can affect your database's throughput: -- **Command complexity**: O(N) and O(log(N)) commands will take more time than O(1) commands, and will affect throughput accordingly. -- **Key and value sizing**: If your database's keys and values are very large, your actual throughput may be lower than expected. If the keys and values are smaller than the typical workload, the actual throughput might be higher than expected. -- **Replication**: Using [multi-zone replication]({{< relref "/operate/rc/databases/configuration/high-availability" >}}) affects throughput as each write operation is executed asynchronously in each zone. -- **Security**: Some security options, such as [transport layer security]({{< relref "/operate/rc/security/database-security/tls-ssl" >}}), may affect throughput. -- **Number of client connections**: The number of client connections affects throughput. Increasing or decreasing the number of client connections can result in higher or lower throughput. - ## Cluster API {#oss-cluster-api} {{< embed-md "oss-cluster-api-intro.md" >}} -The Cluster API is only supported on Redis Cloud Pro databases. You can enable it in the Scalability section of the configuration screen. +The Cluster API is only supported on Redis Cloud Pro databases. You can enable it in the Performance section of the configuration screen. The Redis Cluster API is supported only when a database uses the [standard hashing policy](#standard-hashing-policy) and does not use Search and Query or Time Series advanced capabilities. diff --git a/content/operate/rc/databases/configuration/sizing.md b/content/operate/rc/databases/configuration/sizing.md new file mode 100644 index 000000000..f62ba578c --- /dev/null +++ b/content/operate/rc/databases/configuration/sizing.md @@ -0,0 +1,81 @@ +--- +Title: Size a Redis Cloud database +alwaysopen: false +categories: +- docs +- operate +- rc +description: Describes sizing considerations for your Redis Cloud database, including throughput and dataset size. +linkTitle: Sizing +weight: $weight +--- + +## Dataset size {#dataset-size} + +The dataset size of a database is a part of the full memory limit for the database. The memory limit represents the maximum amount of memory for the database, which includes data values, keys, module data, and overhead for specific features. High availability features, such as replication and Active-Active, increase memory consumption, so your dataset size and memory limit will be different. + +For Redis Cloud Essentials, the plan size refers to the full memory limit, not the dataset size. Both the total memory limit and dataset size are listed under **Database details** when you create an Essentials database. + +For Redis Cloud Pro, you define your dataset size when you create the database, and we calculate your total memory limit based on the features you choose. + +Here are some general guidelines: + +- Memory limit represents an upper limit. You cannot store more data than the memory limit. Depending on your other selections, available memory for data may be less than expected. + +- [Replication]({{< relref "/operate/rc/databases/configuration/high-availability" >}}) doubles memory consumption; that is, 512 MB of data requires at least 1 GB of memory limit when replication is enabled. This affects both Redis Cloud Pro and Redis Cloud Essentials. For example, if you subscribe to a 1 GB Essentials plan, Redis will allocate 512 MB for your dataset and the other 512 MB for replication. + +- [Active-Active]({{< relref "/operate/rc/databases/configuration/active-active-redis" >}}) also doubles memory consumption and the effect is cumulative with replication's impact. Since Active-Active requires replication to be turned on, the memory limit impact can be as large as four times (4x) the original data size. + +- [Advanced capabilities]({{< relref "/operate/rc/databases/configuration/advanced-capabilities" >}}) also consume memory. For search databases, consider index size when you size your database. See [Search and query sizing]({{< relref "/operate/rc/databases/configuration/advanced-capabilities#search-and-query-sizing" >}}) for more info. + +Memory limits in Redis Cloud are subject to the same considerations as Redis Enterprise Software; to learn more, see [Database memory limits]({{< relref "/operate/rs/databases/memory-performance/memory-limit" >}}). + +## Throughput + +Throughput is the number of operations a database can handle over a certain period of time. For Redis Cloud databases, throughput is defined in operations per second (ops/sec). + +For a Redis Cloud Pro subscription, you define throughput for a database when you create it. For a Redis Cloud Essentials subscription, your maximum throughput depends on your plan. + +We use this setting to guide the allocation of compute power and network bandwidth, ensuring your database can handle the expected workload. However, the throughput specified is not guaranteed - actual throughput may be higher or lower depending on your workload and database configuration. + +### Throughput factors and variability + +Some factors that can affect throughput include: +- **Request size**: Smaller requests (under 3KB) consume less network bandwidth and may result in more operations per second than requested, while larger requests may result in fewer operations per second. +- **Command complexity**: Simple commands, like `GET` and `SET`, are faster and require fewer resources, whereas more complex commands involve more processing time and can reduce throughput. See the [Command list]({{< relref "/commands" >}}) to see which commands are more complex than others. +- **Replication**: Using [multi-zone replication]({{< relref "/operate/rc/databases/configuration/high-availability" >}}) affects throughput as each write operation is executed asynchronously in each zone. +- **Security**: Some security options, such as [transport layer security]({{< relref "/operate/rc/security/database-security/tls-ssl" >}}), may affect throughput. +- **Number of client connections**: The number of client connections affects throughput. Increasing or decreasing the number of client connections can result in higher or lower throughput. + +### Optimize throughput + +Here are some things to keep in mind for optimizing throughput: +- Optimize capacity planning and sizing of your Redis Cloud databases to meet your app performance requirements. +- Benchmark your app to understand what latency expectations are required, and adjust throughput accordingly. +- Test and monitor your app's performance and adjust the set ops/sec based on how if performs in real-world conditions. +- If your average request size is larger than 3KB, consider setting your throughput higher than expected. +- Track the slow logs using the [`SLOWLOG` command]({{< relref "/commands/slowlog" >}}) or the **Slowlog** tab on the [database screen]({{< relref "/operate/rc/databases/view-edit-database" >}}). +- Use [pipelining]({{< relref "/develop/use/pipelining" >}}) and [concurrent connections]({{< relref "/develop/reference/clients" >}}) effectively to optimize throughput and latency. +- Search databases have their own throughput requirements. See [Search and query sizing]({{< relref "/operate/rc/databases/configuration/advanced-capabilities#search-and-query-sizing" >}}) for more info. + +### Frequently asked questions + +**Can my workload exceed the configured throughput?** + +Yes, many workloads perform better than expected, especially with optimized configurations and ideal conditions. + +**Why is my application getting less throughput than what I set?** + +Factors like high connection counts, complex commands, large payloads, and network limitations can affect throughput. Test and adjust based on your application’s needs. + +**How do I know the optimal throughput for my application?** + +Start with the expected ops/sec. Most of the time, it’s more than enough. For an average request size of less than 3KB, we suggest specifying lower ops/sec than expected to reduce costs. For request sizes higher than 3 KB, we suggest specifying higher ops/sec than your target. Benchmarking your application under production-like conditions will help you find the best configuration. + +**How does network bandwidth impact throughput?** + +Insufficient bandwidth can bottleneck performance, especially with large request sizes. + +**What is the expected latency for my application?** + +Latency expectations vary by use case. Some applications tolerate milliseconds, while others require sub-millisecond performance. Benchmark your application to understand its latency profile. \ No newline at end of file diff --git a/content/operate/rc/databases/create-database/create-active-active-database.md b/content/operate/rc/databases/create-database/create-active-active-database.md index c6ffaf5c2..2b968b3f7 100644 --- a/content/operate/rc/databases/create-database/create-active-active-database.md +++ b/content/operate/rc/databases/create-database/create-active-active-database.md @@ -189,7 +189,7 @@ Use the **Database list** to check the status of your databases. - [Create a Pro database with a new subscription]({{< relref "/operate/rc/databases/create-database/create-pro-database-new" >}}) - [Active-Active Redis]({{< relref "/operate/rc/databases/configuration/active-active-redis" >}}) - [Develop applications with Active-Active databases]({{< relref "/operate/rs/databases/active-active/develop/_index.md" >}}) -- Database [memory limit]({{< relref "/operate/rc/databases/configuration/clustering#dataset-size" >}}) +- Database [memory limit]({{< relref "/operate/rc/databases/configuration/sizing#dataset-size" >}}) - Redis Cloud [subscription plans]({{< relref "/operate/rc/subscriptions/" >}}) - [Redis Cloud pricing](https://redis.io/pricing/#monthly) diff --git a/content/operate/rc/databases/create-database/create-pro-database-existing.md b/content/operate/rc/databases/create-database/create-pro-database-existing.md index 1c31d24d2..f4447f8c8 100644 --- a/content/operate/rc/databases/create-database/create-pro-database-existing.md +++ b/content/operate/rc/databases/create-database/create-pro-database-existing.md @@ -55,16 +55,16 @@ The available settings vary according to your subscription plan: | **Query performance factor** | *(Search and query databases on Redis 7.2 or later only)* Adds additional compute power to process your query and vector search workloads and boost your queries per second. See [Search and query sizing]({{< relref "/operate/rc/databases/configuration/advanced-capabilities#search-and-query-sizing" >}}) for more information. | | **Supported Protocol(s)** | Choose between RESP2 and RESP3 _(Redis 7.2 only)_. See [Redis serialization protocol]({{< relref "/develop/reference/protocol-spec" >}}#resp-versions) for details | -## Scalability section +## Performance section -The **Scalability** section lets you manage the maximum size, throughput, and hashing policy for a database. +The **Performance** section lets you manage the maximum size, throughput, and hashing policy for a database. -{{Use the Scalability section to control the size, throughput, and hashing policy for a database.}} +{{Use the Performance section to control the size, throughput, and hashing policy for a database.}} | Setting name | Description | |:--------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| **Dataset size** | Maximum size (in GB) for your dataset. See [Dataset size]({{< relref "/operate/rc/databases/configuration/clustering#dataset-size" >}}) for sizing considerations.
Databases with Search and query have specific size requirements, see [Search and query sizing]({{< relref "/operate/rc/databases/configuration/advanced-capabilities#search-and-query-sizing" >}}) for more information. | -| **Throughput** | Defines throughput in terms of maximum operations per second for the database. See [Throughput]({{< relref "/operate/rc/databases/configuration/clustering#throughput" >}}) for more info.
Databases with Search and query have specific throughput requirements, see [Search and query sizing]({{< relref "/operate/rc/databases/configuration/advanced-capabilities#search-and-query-sizing" >}}) for more information. | +| **Dataset size** | Maximum size (in GB) for your dataset. See [Dataset size]({{< relref "/operate/rc/databases/configuration/sizing#dataset-size" >}}) for sizing considerations.
Databases with Search and query have specific size requirements, see [Search and query sizing]({{< relref "/operate/rc/databases/configuration/advanced-capabilities#search-and-query-sizing" >}}) for more information. | +| **Throughput** | Defines throughput in terms of maximum operations per second for the database. See [Throughput]({{< relref "/operate/rc/databases/configuration/sizing#throughput" >}}) for more info.
Databases with Search and query have specific throughput requirements, see [Search and query sizing]({{< relref "/operate/rc/databases/configuration/advanced-capabilities#search-and-query-sizing" >}}) for more information. | | **High availability** | Replicates your data across multiple nodes, as allowed by your subscription plan. See [High availability]({{< relref "/operate/rc/databases/configuration/high-availability" >}}) for more info| | **Hashing policy** | Defines the [hashing policy]({{< relref "/operate/rc/databases/configuration/clustering#manage-the-hashing-policy" >}}). | | **OSS Cluster API** | Enables the [Cluster API]({{< relref "/operate/rc/databases/configuration/clustering#oss-cluster-api" >}}) for a database

When this option is enabled, you cannot define a custom hashing policy. | diff --git a/content/operate/rc/databases/create-database/create-pro-database-new.md b/content/operate/rc/databases/create-database/create-pro-database-new.md index f3995c233..9ba01bd10 100644 --- a/content/operate/rc/databases/create-database/create-pro-database-new.md +++ b/content/operate/rc/databases/create-database/create-pro-database-new.md @@ -54,7 +54,7 @@ If you choose to create your database with Easy create: | Database setting | Description | |:---------|:-----------| | **Dataset size (GB)** | The amount of data for your dataset. Specify small sizes as decimals of 1.0 GB; example: `0.1` GB (minimum). We calculate the total memory limit for you based on the other settings you choose for your database.
Databases with Search and query have specific sizing requirements, see [Search and query sizing]({{< relref "/operate/rc/databases/configuration/advanced-capabilities#search-and-query-sizing" >}}) for more information. | - | **Throughput** | Identifies maximum throughput for the database, which is specified in terms of operations per second (**Ops/sec**). See [Throughput]({{< relref "/operate/rc/databases/configuration/clustering#throughput" >}}) for more information.
Databases with Search and query have specific throughput requirements, see [Search and query sizing]({{< relref "/operate/rc/databases/configuration/advanced-capabilities#search-and-query-sizing" >}}) for more information. | + | **Throughput** | Identifies maximum throughput for the database, which is specified in terms of operations per second (**Ops/sec**). See [Throughput]({{< relref "/operate/rc/databases/configuration/sizing#throughput" >}}) for more information.
Databases with Search and query have specific throughput requirements, see [Search and query sizing]({{< relref "/operate/rc/databases/configuration/advanced-capabilities#search-and-query-sizing" >}}) for more information. | | **High Availability** | Indicates whether a replica copy of the database is maintained in case the primary database becomes unavailable. (Warning: doubles memory consumption). See [High Availability]({{< relref "/operate/rc/databases/configuration/high-availability" >}}). | 1. Select **View all settings** to review the database settings that we selected for you based on your use case. diff --git a/content/operate/rc/databases/view-edit-database.md b/content/operate/rc/databases/view-edit-database.md index 87f342417..883c21e0f 100644 --- a/content/operate/rc/databases/view-edit-database.md +++ b/content/operate/rc/databases/view-edit-database.md @@ -56,24 +56,22 @@ The available settings vary according to your plan, cloud provider, and design c | **Supported Protocol(s)** | Shows which version of RESP the database uses. See [Redis serialization protocol]({{< relref "/develop/reference/protocol-spec" >}}#resp-versions) for details | | **Advanced Capabilites** | This setting appears when an [advanced capability]({{< relref "/operate/rc/databases/configuration/advanced-capabilities" >}}) is enabled for a database | -### Scalability section +### Performance section -The **Scalability** section describes the memory size, throughput, and hashing policy for a database. +The **Performance** section describes the memory size, throughput, and hashing policy for a database. -{{Use the Scalability section to control the size, throughput, and hashing policy for a database.}} - -The **Scalability** section is primarily for Redis Cloud Pro plans. Redis Cloud Essentials plans have options for memory limit and memory used. +{{Use the Performance section to control the size, throughput, and hashing policy for a database.}} | Setting name |Description| |:----------------------|:----------| -| **Dataset size** | Maximum size (in GB) for your dataset. | +| **Dataset size** | Maximum size (in GB) for your dataset. See [Dataset size]({{< relref "/operate/rc/databases/configuration/sizing#dataset-size" >}}). | +| **Throughput** | Defines [throughput]({{< relref "/operate/rc/databases/configuration/sizing#throughput" >}}) in terms of maximum operations per second for the database (_Redis Cloud Pro only_). | | **Memory used** | Memory currently used for your database. | -| **Throughput** | Defines throughput in terms of maximum operations per second for the database | | **High availability** | Replicates your data across multiple nodes; [available options]({{< relref "/operate/rc/databases/configuration/high-availability" >}}) depend on your plan type | -| **Hashing policy** | Defines the [hashing policy]({{< relref "/operate/rc/databases/configuration/clustering#manage-the-hashing-policy" >}}). | -| **OSS Cluster API** | Enables the [Cluster API]({{< relref "/operate/rc/databases/create-database#oss-cluster-api" >}}) for a database.

When this option is enabled, you cannot define a custom hashing policy.| +| **Hashing policy** | Defines the [hashing policy]({{< relref "/operate/rc/databases/configuration/clustering#manage-the-hashing-policy" >}}) (_Redis Cloud Pro only_). | +| **OSS Cluster API** | Enables the [Cluster API]({{< relref "/operate/rc/databases/configuration/clustering#oss-cluster-api" >}}) for a database (_Redis Cloud Pro only_).

When this option is enabled, you cannot define a custom hashing policy.| -To learn more about these settings and when to use them, see [Database clustering]({{< relref "/operate/rc/databases/configuration/clustering" >}}). +To learn more about these settings and when to use them, see [Sizing]({{< relref "/operate/rc/databases/configuration/sizing" >}}) and [Database clustering]({{< relref "/operate/rc/databases/configuration/clustering" >}}). ### Durability section @@ -200,12 +198,12 @@ Here's what you can change: | General | Database name || | | Supported protocol(s) || | | Tags || -| Scalability | Memory limit | | +| Performance | Dataset size | | +| | High-availability | _Paid plans only_ | | | Throughput | _Pro plans only_ | | | Hashing policy | _Pro plans only_ | | | OSS Cluster API | _Pro plans only_ | -| Durability | High-availability | _Paid plans only_ | -| | Data persistence | _Paid plans only_ | +| Durability | Data persistence | _Paid plans only_ | | | Data eviction policy | | | | Remote backup | _Paid plans only_ | | | Active-Passive Redis | _Pro plans only_ | diff --git a/content/operate/redisinsight/_index.md b/content/operate/redisinsight/_index.md index 61d124e7d..34ea2d790 100644 --- a/content/operate/redisinsight/_index.md +++ b/content/operate/redisinsight/_index.md @@ -2,6 +2,11 @@ title: Redis Insight description: Install and manage Redis Insight linkTitle: Redis Insight +categories: +- docs +- operate +- redisinsight --- -For information on using Redis Insight, see [these pages]({{< relref "/develop/tools/insight" >}}). \ No newline at end of file +For information on using Redis Insight, see [these pages]({{< relref "/develop/tools/insight" >}}). + diff --git a/content/operate/rs/installing-upgrading/upgrading/_index.md b/content/operate/rs/installing-upgrading/upgrading/_index.md index 45fe3aff4..18f42098b 100644 --- a/content/operate/rs/installing-upgrading/upgrading/_index.md +++ b/content/operate/rs/installing-upgrading/upgrading/_index.md @@ -1,19 +1,42 @@ --- -Title: Upgrade an existing Redis Enterprise Software deployment +Title: Upgrade an existing Redis Software deployment alwaysopen: false categories: - docs - operate - rs -description: null +description: How to upgrade a cluster's Redis Software version, database version, and operating system. hideListLinks: true linkTitle: Upgrade weight: 60 --- -To upgrade Redis Enterprise Software: -1. Verify appropriate [network ports]({{< relref "/operate/rs/networking/port-configurations.md" >}}) are either open or used by Redis Enterprise Software. +## Upgrade Redis Software -1. [Upgrade the software on all nodes of the cluster.]({{< relref "/operate/rs/installing-upgrading/upgrading/upgrade-cluster" >}}) +To upgrade Redis Software: -2. _(Optional)_ [Upgrade each database]({{< relref "/operate/rs/installing-upgrading/upgrading/upgrade-database" >}}) in the cluster or [upgrade an Active-Active database]({{< relref "/operate/rs/installing-upgrading/upgrading/upgrade-active-active" >}}) to enable new features and important fixes. +1. Verify appropriate [network ports]({{< relref "/operate/rs/networking/port-configurations.md" >}}) are either open or used by Redis Software. + +1. Review the [prerequisites]({{}}). + +1. Upgrade the software on all nodes of the cluster using one of the following methods: + + - [In-place upgrade]({{}}) - Directly upgrade Redis Software on each node in the cluster. Although this method is simpler than the rolling upgrade method, it might cause brief service interruptions as each node is upgraded. + + - [Rolling upgrade]({{}}) - Minimize downtime by adding new nodes with an updated Redis Software version to the cluster, one at a time, while keeping the rest of the cluster operational. This method is recommended for production environments that require continuous availability. + +## Upgrade Redis database + +[Upgrade each database]({{< relref "/operate/rs/installing-upgrading/upgrading/upgrade-database" >}}) in the cluster or [upgrade an Active-Active database]({{< relref "/operate/rs/installing-upgrading/upgrading/upgrade-active-active" >}}) to enable new features and important fixes. + +## Upgrade operating system + +To upgrade the cluster's operating system: + +1. Review the [prerequisites]({{}}). + +2. Use one of the following rolling upgrade methods: + + - [Extra node method]({{}}) - Recommended if you have additional resources available. + + - [Replace node method]({{}}) - Recommended if you cannot temporarily allocate additional resources. \ No newline at end of file diff --git a/content/operate/rs/installing-upgrading/upgrading/upgrade-cluster.md b/content/operate/rs/installing-upgrading/upgrading/upgrade-cluster.md index 51c4ff0fb..2cf2bd693 100644 --- a/content/operate/rs/installing-upgrading/upgrading/upgrade-cluster.md +++ b/content/operate/rs/installing-upgrading/upgrading/upgrade-cluster.md @@ -1,17 +1,25 @@ --- -Title: Upgrade a Redis Enterprise Software cluster +Title: Upgrade a Redis Software cluster alwaysopen: false categories: - docs - operate - rs -description: Upgrade a Redis Enterprise Software cluster. +description: Upgrade a cluster to a later version of Redis Software. linkTitle: Upgrade cluster toc: 'true' weight: 30 tocEmbedHeaders: true --- +Before you upgrade a cluster to a later Redis Software version, review the [supported upgrade paths](#supported-upgrade-paths) and [prerequisites](#upgrade-prerequisites). + +To upgrade a cluster's Redis Software version, use one of the following methods: + +- [In-place upgrade](#in-place-upgrade) - Directly upgrade Redis Software on each node in the cluster. Although this method is simpler than the rolling upgrade method, it might cause brief service interruptions as each node is upgraded. + +- [Rolling upgrade](#rolling-upgrade) - Minimize downtime by adding new nodes with an updated Redis Software version to the cluster, one at a time, while keeping the rest of the cluster operational. This method is recommended for production environments that require continuous availability. + {{}} See the [Redis Enterprise Software product lifecycle]({{}}) for more information about release numbers and the end-of-life schedule. @@ -38,9 +46,9 @@ Before upgrading a cluster: - [`GET /nodes/status`]({{< relref "/operate/rs/references/rest-api/requests/nodes/status#get-all-nodes-status" >}}) REST API request -## Upgrade cluster +## In-place upgrade -Starting with the primary (master) node, follow these steps for every node in the cluster. To ensure cluster availability, upgrade each node separately. +Starting with the primary node, follow these steps for every node in the cluster. To ensure cluster availability, upgrade each node separately. 1. Verify node operation with the following commands: @@ -49,6 +57,10 @@ Starting with the primary (master) node, follow these steps for every node in th $ rladmin status extra all ``` + {{}} +Do not proceed if any shard, node, or endpoint is not `OK`. + {{}} + 2. Download the Redis Enterprise Software installation package to the machine running the node from the Download Center on [https://cloud.redis.io](https://cloud.redis.io). 3. Extract the installation package: @@ -82,7 +94,89 @@ You cannot change the installation path or the user during the upgrade. If the Cluster Manager UI was open in a web browser during the upgrade, refresh the browser to reload the console. -After all nodes are upgraded, the cluster is fully upgraded. Certain features introduced in the new version of Redis Enterprise Software only become available after upgrading the entire cluster. +## Rolling upgrade + +To perform a rolling upgrade of the cluster, use one of the following methods: + +- [Extra node method](#extra-node-upgrade) - recommended if you have additional resources available + +- [Replace node method](#replace-node-upgrade) - recommended if you cannot temporarily allocate additional resources + +### Extra node upgrade method {#extra-node-upgrade} + +1. [Install a later version of Redis Software]({{< relref "/operate/rs/installing-upgrading/install/install-on-linux" >}}) on a new node. + +1. [Add the new node]({{< relref "/operate/rs/clusters/add-node" >}}) to the cluster. + +1. [Remove one node]({{< relref "/operate/rs/clusters/remove-node#remove-a-node" >}}) running the earlier Redis Software version from the cluster. + +1. Repeat the previous steps until all nodes with the earlier Redis Software version are removed. If the final node to remove from the cluster is the primary node, [demote it]({{}}) to a secondary node before you remove it. + +### Replace node upgrade method {#replace-node-upgrade} + +1. [Remove a node]({{< relref "/operate/rs/clusters/remove-node#remove-a-node" >}}) with the earlier Redis Software version from the cluster. + +1. Uninstall Redis Enterprise Software from the removed node: + + ```sh + sudo ./rl_uninstall.sh + ``` + +1. [Install a later version of Redis Software]({{< relref "/operate/rs/installing-upgrading/install/install-on-linux" >}}) on the removed node or a new node. + +1. [Add the new node]({{< relref "/operate/rs/clusters/add-node" >}}) to the cluster. + + If you want to reuse the removed node's ID when you add the node to the cluster, run [`rladmin cluster join`]({{< relref "/operate/rs/references/cli-utilities/rladmin/cluster/join" >}}) with the `replace_node` flag: + + ```sh + rladmin cluster join nodes username password replace_node + ``` + +1. Verify node health: + + 1. Run `rlcheck` on all nodes: + + ```sh + rlcheck + ``` + + The output lists the result of each verification test: + + ```sh + ##### Welcome to Redis Enterprise Cluster settings verification utility #### + Running test: verify_bootstrap_status + PASS + ... + Running test: verify_encrypted_gossip + PASS + Summary: + ------- + ALL TESTS PASSED. + ``` + + For healthy nodes, the expected output is `ALL TESTS PASSED`. + + 1. Run [`rladmin status`]({{< relref "/operate/rs/references/cli-utilities/rladmin/status" >}}) on the new node: + + ```sh + rladmin status extra all + ``` + + The expected output is the `OK` status for the cluster, nodes, endpoints, and shards: + + ```sh + CLUSTER: + OK. Cluster master: 2 () + Cluster health: OK, [0, 0.0, 0.0] + failures/minute - avg1 0.00, avg15 0.00, avg60 0.00. + ... + ``` + +1. Repeat the previous steps until all nodes with the earlier Redis Software version are replaced. If the final node to remove from the cluster is the primary node, [demote it]({{}}) to a secondary node before you remove it. + +## After cluster upgrade + +After all nodes are upgraded, the cluster is fully upgraded. Certain features introduced in the new version of Redis Software only become available after upgrading the entire cluster. After upgrading from version 6.0.x to 6.2.x, restart `cnm_exec` on each cluster node to enable more advanced state machine handling capabilities: diff --git a/content/operate/rs/release-notes/rs-7-4-2-releases/rs-7-4-6-188.md b/content/operate/rs/release-notes/rs-7-4-2-releases/rs-7-4-6-188.md new file mode 100644 index 000000000..df03750bb --- /dev/null +++ b/content/operate/rs/release-notes/rs-7-4-2-releases/rs-7-4-6-188.md @@ -0,0 +1,324 @@ +--- +Title: Redis Enterprise Software release notes 7.4.6-188 (January 2025) +alwaysopen: false +categories: +- docs +- operate +- rs +compatibleOSSVersion: Redis 7.2.4 +description: Bug fix for a memory leak in the DMC proxy process. +linkTitle: 7.4.6-188 (January 2025) +weight: 63 +aliases: +--- + +This is a maintenance release for ​[​Redis Enterprise Software version 7.4.6](https://redis.io/downloads/#software). + +## Highlights + +This version offers: + +- A bug fix for a memory leak in the DMC proxy process + +## New in this release + +#### Redis module feature sets + +Redis Enterprise comes packaged with several modules. As of version 7.4.2, Redis Enterprise includes two feature sets, compatible with different Redis database versions. + +Bundled Redis modules compatible with Redis database version 7.2: + +- [RediSearch 2.8]({{< relref "/operate/oss_and_stack/stack-with-enterprise/release-notes/redisearch/redisearch-2.8-release-notes.md" >}}) + +- [RedisJSON 2.6]({{< relref "/operate/oss_and_stack/stack-with-enterprise/release-notes/redisjson/redisjson-2.6-release-notes.md" >}}) + +- [RedisTimeSeries 1.10]({{< relref "/operate/oss_and_stack/stack-with-enterprise/release-notes/redistimeseries/redistimeseries-1.10-release-notes.md" >}}) + +- [RedisBloom 2.6]({{< relref "/operate/oss_and_stack/stack-with-enterprise/release-notes/redisbloom/redisbloom-2.6-release-notes.md" >}}) + +- [RedisGears 2.0 preview](https://github.com/RedisGears/RedisGears/releases/tag/v2.0.20-m21): The RedisGears preview will not be promoted to GA and will be removed in a future release. + +Bundled Redis modules compatible with Redis database versions 6.0 and 6.2: + +- [RediSearch 2.6]({{< relref "/operate/oss_and_stack/stack-with-enterprise/release-notes/redisearch/redisearch-2.6-release-notes.md" >}}) + +- [RedisJSON 2.4]({{< relref "/operate/oss_and_stack/stack-with-enterprise/release-notes/redisjson/redisjson-2.4-release-notes.md" >}}) + +- [RedisTimeSeries 1.8]({{< relref "/operate/oss_and_stack/stack-with-enterprise/release-notes/redistimeseries/redistimeseries-1.8-release-notes.md" >}}) + +- [RedisBloom 2.4]({{< relref "/operate/oss_and_stack/stack-with-enterprise/release-notes/redisbloom/redisbloom-2.4-release-notes.md" >}}) + +- [RedisGraph v2.10]({{< relref "/operate/oss_and_stack/stack-with-enterprise/release-notes/redisgraph/redisgraph-2.10-release-notes.md" >}}): RedisGraph end-of-life has been announced and will be removed in a future release. See the [RedisGraph end-of-life announcement](https://redis.io/blog/redisgraph-eol/) for more details. + +### Resolved issues + +- RS126364: Fixed a memory leak in the DMC proxy process. + +## Version changes + +### Product lifecycle updates + +#### End-of-life policy extension + +The end-of-life policy for Redis Enterprise Software versions 6.2 and later has been extended to 24 months after the formal release of the subsequent major version. For the updated end-of-life schedule, see the [Redis Enterprise Software product lifecycle]({{}}). + +#### Supported upgrade paths + +Redis Enterprise Software versions 6.2.4 and 6.2.8 do not support direct upgrades beyond version 7.4.x. Versions 6.2.10, 6.2.12, and 6.2.18 are part of the [upgrade path]({{}}). To upgrade from 6.2.4 or 6.2.8 to versions later than 7.4.x, an intermediate upgrade is required. + +The next major Redis Enterprise Software release will still bundle Redis database version 6.2 and allow database upgrades from Redis database version 6.2 to 7.x. + +See the [Redis Enterprise Software product lifecycle]({{}}) for more information about release numbers. + +### Deprecations + +#### Legacy UI deprecation + +The legacy UI is deprecated in favor of the new Cluster Manager UI and will be removed in a future release. + +#### Redis 6.0 database deprecation + +Redis database version 6.0 is deprecated as of Redis Enterprise Software version 7.4.2 and will be removed in a future release. + +To prepare for the future removal of Redis 6.0: + +- For Redis Enterprise 6.2.* clusters, upgrade Redis 6.0 databases to Redis 6.2. See the [Redis 6.2 release notes](https://raw.githubusercontent.com/redis/redis/6.2/00-RELEASENOTES) for the list of changes. + +- For Redis Enterprise 7.2.4 and 7.4.x clusters, upgrade Redis 6.0 databases to Redis 7.2. Before you upgrade your databases, see the list of [Redis 7.2 breaking changes]({{< relref "/operate/rs/release-notes/rs-7-2-4-releases/rs-7-2-4-52#redis-72-breaking-changes" >}}) and update any applications that connect to your database to handle these changes. + +#### End of triggers and functions preview + +The [triggers and functions]({{}}) (RedisGears) preview has been discontinued. + +- Commands such as `TFCALL`, `TFCALLASYNC`, and `TFUNCTION` will be deprecated and will return error messages. + +- Any JavaScript functions stored in Redis will be removed. + +- JavaScript-based triggers will be blocked. + +- Lua functions and scripts will not be affected. + +If your database currently uses triggers and functions, you need to: + +1. Adjust your applications to accommodate these changes. + +1. Delete all triggers and functions libraries from your existing database: + + 1. Run `TFUNCTION LIST`. + + 1. Copy all library names. + + 1. Run `TFUNCTION DELETE` for each library in the list. + + If any triggers and functions libraries remain in the database, the RDB snapshot won't load on a cluster without RedisGears. + +1. Migrate your database to a new database without the RedisGears module. + +#### RedisGraph end of life + +Redis has announced the end of life for RedisGraph. Redis will continue to support all RedisGraph customers, including releasing patch versions until January 31, 2025. + +See the [RedisGraph end-of-life announcement](https://redis.com/blog/redisgraph-eol/) for more details. + +### Upcoming changes + +#### Default image change for Redis Enterprise Software containers + +Starting with the next major version, Redis Enterprise Software containers with the image tag `x.y.z-build` will be based on RHEL instead of Ubuntu. + +This change will only affect you if you use containers outside the official [Redis Enterprise for Kubernetes]({{}}) product and use Ubuntu-specific commands. + +To use Ubuntu-based images after this change, you can specify the operating system suffix in the image tag. For example, use the image tag `7.4.2-216.focal` instead of `7.4.2-216`. + +### Supported platforms + +The following table provides a snapshot of supported platforms as of this Redis Software release. See the [supported platforms reference]({{< relref "/operate/rs/references/supported-platforms" >}}) for more details about operating system compatibility. + + Supported – The platform is supported for this version of Redis Software and Redis Stack modules. + +:warning: Deprecation warning – The platform is still supported for this version of Redis Software, but support will be removed in a future release. + +| Redis Software
major versions | 7.8 | 7.4 | 7.2 | 6.4 | 6.2 | +|---------------------------------|:-----:|:-----:|:-----:|:-----:|:-----:| +| **Release date** | Nov 2024 | Feb 2024 | Aug 2023 | Feb 2023 | Aug 2021 | +| [**End-of-life date**]({{< relref "/operate/rs/installing-upgrading/product-lifecycle#endoflife-schedule" >}}) | Determined after
next major release | Nov 2026 | Feb 2026 | Aug 2025 | Feb 2025 | +| **Platforms** | | | | | | +| RHEL 9 &
compatible distros[1](#table-note-1) | | | – | – | – | +| RHEL 9
FIPS mode[5](#table-note-5) | | – | – | – | – | +| RHEL 8 &
compatible distros[1](#table-note-1) | | | | | | +| RHEL 7 &
compatible distros[1](#table-note-1) | – | – | :warning: | | | +| Ubuntu 20.04[2](#table-note-2) | | | | | – | +| Ubuntu 18.04[2](#table-note-2) | – | :warning: | :warning: | | | +| Ubuntu 16.04[2](#table-note-2) | – | – | :warning: | | | +| Amazon Linux 2 | | | | | – | +| Amazon Linux 1 | – | – | | | | +| Kubernetes[3](#table-note-3) | | | | | | +| Docker[4](#table-note-4) | | | | | | + +1. The RHEL-compatible distributions CentOS, CentOS Stream, Alma, and Rocky are supported if they have full RHEL compatibility. Oracle Linux running the Red Hat Compatible Kernel (RHCK) is supported, but the Unbreakable Enterprise Kernel (UEK) is not supported. + +2. The server version of Ubuntu is recommended for production installations. The desktop version is only recommended for development deployments. + +3. See the [Redis Enterprise for Kubernetes documentation]({{< relref "/operate/kubernetes/reference/supported_k8s_distributions" >}}) for details about support per version and Kubernetes distribution. + +4. [Docker images]({{< relref "/operate/rs/installing-upgrading/quickstarts/docker-quickstart" >}}) of Redis Software are certified for development and testing only. + +5. Supported only if [FIPS was enabled during RHEL installation](https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/9/html/security_hardening/switching-rhel-to-fips-mode_security-hardening#proc_installing-the-system-with-fips-mode-enabled_switching-rhel-to-fips-mode) to ensure FIPS compliance. + +## Downloads + +The following table shows the SHA256 checksums for the available packages: + +| Package | SHA256 checksum (7.4.6-188 January release) | +|---------|---------------------------------------| +| Ubuntu 18 | 452446126776a93a6ac943f6b9ed0a274a2b48eb18d5ec057d15459e6148caa2 | +| Ubuntu 20 | 91c245d39007fc1c5ad86dc991286ce30d837289f541518bfee5249ccb3864bb | +| Red Hat Enterprise Linux (RHEL) 8 | 995bfa4aad1f1ab4611fbe83d1d3cbb2ddc45b9fa6d53900267385e6e5c4d162 | +| Red Hat Enterprise Linux (RHEL) 9 | b4fc25f6a812fe05ab990049f7f6f8a22932c5d8c507c475455802009ed5acca | +| Amazon Linux 2 | 042b016fd9fac8fac95d51e9fbfece6f4ab5e705146e7423ea23b68ce872c76f | + +## Known issues + +- RS131972: Creating an ACL that contains a line break in the Cluster Manager UI can cause shard migration to fail due to ACL errors. + +- RS61676: Full chain certificate update fails if any certificate in the chain does not have a Common Name (CN). + +- RS119958: The `debuginfo` script fails with the error `/bin/tar: Argument list too long` if there are too many RocksDB log files. This issue only affects clusters with Auto Tiering. + +## Known limitations + +#### New Cluster Manager UI limitations + +The following legacy UI features are not yet available in the new Cluster Manager UI: + +- Remove a node. + + Use the REST API or legacy UI instead. See [Remove a cluster node]({{< relref "/operate/rs/clusters/remove-node" >}}) for instructions. + +- Purge an Active-Active instance. + + Use [`crdb-cli crdb purge-instance`]({{< relref "/operate/rs/references/cli-utilities/crdb-cli/crdb/purge-instance" >}}) instead. + +- Search and export the log. + +#### OpenSSL compatibility issue for 7.4.2 modules on Amazon Linux 2 + +Due to an OpenSSL 1.1 compatibility issue between modules and clusters, Redis Enterprise Software version 7.4.2-54 is not fully supported on Amazon Linux 2 clusters with databases that use the following modules: RedisGears, RediSearch, or RedisTimeSeries. + +This issue will be fixed in a future maintenance release. + +#### RedisGraph prevents upgrade to RHEL 9 + +You cannot upgrade from a prior RHEL version to RHEL 9 if the Redis Enterprise cluster contains a RedisGraph module, even if unused by any database. The [RedisGraph module has reached End-of-Life](https://redis.com/blog/redisgraph-eol/) and is completely unavailable in RHEL 9. + +## Security + +#### Open source Redis security fixes compatibility + +As part of Redis's commitment to security, Redis Enterprise Software implements the latest [security fixes](https://github.com/redis/redis/releases) available with [open source Redis](https://github.com/redis/redis). Redis Enterprise has already included the fixes for the relevant CVEs. + +Some CVEs announced for open source Redis do not affect Redis Enterprise due to different or additional functionality available in Redis Enterprise that is not available in open source Redis. + +Redis Enterprise 7.4.6-188 supports open source Redis 7.2, 6.2, and 6.0. Below is the list of open source Redis CVEs fixed by version. + +Redis 7.2.x: + +- (CVE-2024-31449) An authenticated user may use a specially crafted Lua script to trigger a stack buffer overflow in the bit library, which may potentially lead to remote code execution. + +- (CVE-2024-31228) An authenticated user can trigger a denial-of-service by using specially crafted, long string match patterns on supported commands such as `KEYS`, `SCAN`, `PSUBSCRIBE`, `FUNCTION LIST`, `COMMAND LIST`, and ACL definitions. Matching of extremely long patterns may result in unbounded recursion, leading to stack overflow and process crashes. + +- (CVE-2023-41056) In some cases, Redis may incorrectly handle resizing of memory buffers, which can result in incorrect accounting of buffer sizes and lead to heap overflow and potential remote code execution. + +- (CVE-2023-41053) Redis does not correctly identify keys accessed by `SORT_RO` and, as a result, may grant users executing this command access to keys that are not explicitly authorized by the ACL configuration. (Redis 7.2.1) + +Redis 7.0.x: + +- (CVE-2024-31449) An authenticated user may use a specially crafted Lua script to trigger a stack buffer overflow in the bit library, which may potentially lead to remote code execution. + +- (CVE-2024-31228) An authenticated user can trigger a denial-of-service by using specially crafted, long string match patterns on supported commands such as `KEYS`, `SCAN`, `PSUBSCRIBE`, `FUNCTION LIST`, `COMMAND LIST`, and ACL definitions. Matching of extremely long patterns may result in unbounded recursion, leading to stack overflow and process crashes. + +- (CVE-2023-41056) In some cases, Redis may incorrectly handle resizing of memory buffers, which can result in incorrect accounting of buffer sizes and lead to heap overflow and potential remote code execution. + +- (CVE-2023-41053) Redis does not correctly identify keys accessed by `SORT_RO` and, as a result, may grant users executing this command access to keys that are not explicitly authorized by the ACL configuration. (Redis 7.0.13) + +- (CVE-2023-36824) Extracting key names from a command and a list of arguments may, in some cases, trigger a heap overflow and result in reading random heap memory, heap corruption, and potentially remote code execution. Specifically: using `COMMAND GETKEYS*` and validation of key names in ACL rules. (Redis 7.0.12) + +- (CVE-2023-28856) Authenticated users can use the `HINCRBYFLOAT` command to create an invalid hash field that will crash Redis on access. (Redis 7.0.11) + +- (CVE-2023-28425) Specially crafted `MSETNX` commands can lead to assertion and denial-of-service. (Redis 7.0.10) + +- (CVE-2023-25155) Specially crafted `SRANDMEMBER`, `ZRANDMEMBER`, and `HRANDFIELD` commands can trigger an integer overflow, resulting in a runtime assertion and termination of the Redis server process. (Redis 7.0.9) + +- (CVE-2023-22458) Integer overflow in the Redis `HRANDFIELD` and `ZRANDMEMBER` commands can lead to denial-of-service. (Redis 7.0.8) + +- (CVE-2022-36021) String matching commands (like `SCAN` or `KEYS`) with a specially crafted pattern to trigger a denial-of-service attack on Redis can cause it to hang and consume 100% CPU time. (Redis 7.0.9) + +- (CVE-2022-35977) Integer overflow in the Redis `SETRANGE` and `SORT`/`SORT_RO` commands can drive Redis to OOM panic. (Redis 7.0.8) + +- (CVE-2022-35951) Executing an `XAUTOCLAIM` command on a stream key in a specific state, with a specially crafted `COUNT` argument, may cause an integer overflow, a subsequent heap overflow, and potentially lead to remote code execution. The problem affects Redis versions 7.0.0 or newer. (Redis 7.0.5) + +- (CVE-2022-31144) A specially crafted `XAUTOCLAIM` command on a stream key in a specific state may result in heap overflow and potentially remote code execution. The problem affects Redis versions 7.0.0 or newer. (Redis 7.0.4) + +- (CVE-2022-24834) A specially crafted Lua script executing in Redis can trigger a heap overflow in the cjson and cmsgpack libraries, and result in heap corruption and potentially remote code execution. The problem exists in all versions of Redis with Lua scripting support, starting from 2.6, and affects only authenticated and authorized users. (Redis 7.0.12) + +- (CVE-2022-24736) An attacker attempting to load a specially crafted Lua script can cause NULL pointer dereference which will result in a crash of the `redis-server` process. This issue affects all versions of Redis. (Redis 7.0.0) + +- (CVE-2022-24735) By exploiting weaknesses in the Lua script execution environment, an attacker with access to Redis can inject Lua code that will execute with the (potentially higher) privileges of another Redis user. (Redis 7.0.0) + +Redis 6.2.x: + +- (CVE-2024-31449) An authenticated user may use a specially crafted Lua script to trigger a stack buffer overflow in the bit library, which may potentially lead to remote code execution. + +- (CVE-2024-31228) An authenticated user can trigger a denial-of-service by using specially crafted, long string match patterns on supported commands such as `KEYS`, `SCAN`, `PSUBSCRIBE`, `FUNCTION LIST`, `COMMAND LIST`, and ACL definitions. Matching of extremely long patterns may result in unbounded recursion, leading to stack overflow and process crashes. + +- (CVE-2023-28856) Authenticated users can use the `HINCRBYFLOAT` command to create an invalid hash field that will crash Redis on access. (Redis 6.2.12) + +- (CVE-2023-25155) Specially crafted `SRANDMEMBER`, `ZRANDMEMBER`, and `HRANDFIELD` commands can trigger an integer overflow, resulting in a runtime assertion and termination of the Redis server process. (Redis 6.2.11) + +- (CVE-2023-22458) Integer overflow in the Redis `HRANDFIELD` and `ZRANDMEMBER` commands can lead to denial-of-service. (Redis 6.2.9) + +- (CVE-2022-36021) String matching commands (like `SCAN` or `KEYS`) with a specially crafted pattern to trigger a denial-of-service attack on Redis can cause it to hang and consume 100% CPU time. (Redis 6.2.11) + +- (CVE-2022-35977) Integer overflow in the Redis `SETRANGE` and `SORT`/`SORT_RO` commands can drive Redis to OOM panic. (Redis 6.2.9) + +- (CVE-2022-24834) A specially crafted Lua script executing in Redis can trigger a heap overflow in the cjson and cmsgpack libraries, and result in heap corruption and potentially remote code execution. The problem exists in all versions of Redis with Lua scripting support, starting from 2.6, and affects only authenticated and authorized users. (Redis 6.2.13) + +- (CVE-2022-24736) An attacker attempting to load a specially crafted Lua script can cause NULL pointer dereference which will result in a crash of the `redis-server` process. This issue affects all versions of Redis. (Redis 6.2.7) + +- (CVE-2022-24735) By exploiting weaknesses in the Lua script execution environment, an attacker with access to Redis can inject Lua code that will execute with the (potentially higher) privileges of another Redis user. (Redis 6.2.7) + +- (CVE-2021-41099) Integer to heap buffer overflow handling certain string commands and network payloads, when `proto-max-bulk-len` is manually configured to a non-default, very large value. (Redis 6.2.6) + +- (CVE-2021-32762) Integer to heap buffer overflow issue in `redis-cli` and `redis-sentinel` parsing large multi-bulk replies on some older and less common platforms. (Redis 6.2.6) + +- (CVE-2021-32761) An integer overflow bug in Redis version 2.2 or newer can be exploited using the `BITFIELD` command to corrupt the heap and potentially result with remote code execution. (Redis 6.2.5) + +- (CVE-2021-32687) Integer to heap buffer overflow with intsets, when `set-max-intset-entries` is manually configured to a non-default, very large value. (Redis 6.2.6) + +- (CVE-2021-32675) Denial Of Service when processing RESP request payloads with a large number of elements on many connections. (Redis 6.2.6) + +- (CVE-2021-32672) Random heap reading issue with Lua Debugger. (Redis 6.2.6) + +- (CVE-2021-32628) Integer to heap buffer overflow handling ziplist-encoded data types, when configuring a large, non-default value for `hash-max-ziplist-entries`, `hash-max-ziplist-value`, `zset-max-ziplist-entries` or `zset-max-ziplist-value`. (Redis 6.2.6) + +- (CVE-2021-32627) Integer to heap buffer overflow issue with streams, when configuring a non-default, large value for `proto-max-bulk-len` and `client-query-buffer-limit`. (Redis 6.2.6) + +- (CVE-2021-32626) Specially crafted Lua scripts may result with Heap buffer overflow. (Redis 6.2.6) + +- (CVE-2021-32625) An integer overflow bug in Redis version 6.0 or newer can be exploited using the STRALGO LCS command to corrupt the heap and potentially result with remote code execution. This is a result of an incomplete fix by CVE-2021-29477. (Redis 6.2.4) + +- (CVE-2021-29478) An integer overflow bug in Redis 6.2 could be exploited to corrupt the heap and potentially result with remote code execution. The vulnerability involves changing the default set-max-intset-entries configuration value, creating a large set key that consists of integer values and using the COPY command to duplicate it. The integer overflow bug exists in all versions of Redis starting with 2.6, where it could result with a corrupted RDB or DUMP payload, but not exploited through COPY (which did not exist before 6.2). (Redis 6.2.3) + +- (CVE-2021-29477) An integer overflow bug in Redis version 6.0 or newer could be exploited using the STRALGO LCS command to corrupt the heap and potentially result in remote code execution. The integer overflow bug exists in all versions of Redis starting with 6.0. (Redis 6.2.3) + +Redis 6.0.x: + +- (CVE-2022-24834) A specially crafted Lua script executing in Redis can trigger a heap overflow in the cjson and cmsgpack libraries, and result in heap corruption and potentially remote code execution. The problem exists in all versions of Redis with Lua scripting support, starting from 2.6, and affects only authenticated and authorized users. (Redis 6.0.20) + +- (CVE-2023-28856) Authenticated users can use the `HINCRBYFLOAT` command to create an invalid hash field that will crash Redis on access. (Redis 6.0.19) + +- (CVE-2023-25155) Specially crafted `SRANDMEMBER`, `ZRANDMEMBER`, and `HRANDFIELD` commands can trigger an integer overflow, resulting in a runtime assertion and termination of the Redis server process. (Redis 6.0.18) + +- (CVE-2022-36021) String matching commands (like `SCAN` or `KEYS`) with a specially crafted pattern to trigger a denial-of-service attack on Redis can cause it to hang and consume 100% CPU time. (Redis 6.0.18) + +- (CVE-2022-35977) Integer overflow in the Redis `SETRANGE` and `SORT`/`SORT_RO` commands can drive Redis to OOM panic. (Redis 6.0.17) diff --git a/data/components/lettuce_async.json b/data/components/lettuce_async.json index 6d1494383..78aacc4e7 100644 --- a/data/components/lettuce_async.json +++ b/data/components/lettuce_async.json @@ -9,7 +9,7 @@ }, "examples": { "git_uri": "https://github.com/redis/lettuce", - "dev_branch": "doctests", + "dev_branch": "main", "path": "src/test/java/io/redis/examples/async", "pattern": "*.java" } diff --git a/data/components/lettuce_reactive.json b/data/components/lettuce_reactive.json index 07e33adb5..2ff9abf9c 100644 --- a/data/components/lettuce_reactive.json +++ b/data/components/lettuce_reactive.json @@ -9,7 +9,7 @@ }, "examples": { "git_uri": "https://github.com/redis/lettuce", - "dev_branch": "doctests", + "dev_branch": "main", "path": "src/test/java/io/redis/examples/reactive", "pattern": "*.java" } diff --git a/layouts/_default/single.html b/layouts/_default/single.html index 22d35e8f0..00d44a5b0 100644 --- a/layouts/_default/single.html +++ b/layouts/_default/single.html @@ -8,4 +8,4 @@

{{ .Title }}

-{{ end }} \ No newline at end of file +{{ end }} diff --git a/layouts/home.html b/layouts/home.html index 2c528d23b..4633210ba 100644 --- a/layouts/home.html +++ b/layouts/home.html @@ -72,111 +72,107 @@

{{ .Title }}

-

Learn

+

Get Started

{{ partial "docs-section.html" (dict - "Title" "Develop with Redis" - "Description" "Learn how to use the Redis in-memory data store." - "ButtonLink" "./develop" - "ButtonLabel" "Learn more" - "LinksLeftTitle" "Featured content" + "Title" "Quickly set up a Redis cache, primary, vector, or custom database" + "Description" "Set up a Free Redis-managed database on AWS, GCP, or Azure" + "ButtonLink" "./operate/rc/rc-quickstart/" + "ButtonLabel" "Redis Cloud quick start" "LinksLeft" (slice - (dict "Text" "Vector search" "URL" "./develop/interact/search-and-query/query/vector-search") - (dict "Text" "Data structure store" "URL" "./develop/get-started/data-store/") - (dict "Text" "Document database" "URL" "./develop/get-started/document-database/") + (dict "Text" "Get started with Redis Community Edition" "URL" "./get-started/") ) ) }} {{ partial "docs-section.html" (dict - "Title" "Libraries and tools" - "Description" "Learn about libraries and tools available for Redis." - "ButtonLink" "./integrate" - "ButtonLabel" "Browse tools" - "LinksLeftTitle" "Featured libraries" - "LinksLeft" (slice - (dict "Text" "RedisVL" "URL" "./integrate/redisvl/") - (dict "Text" "RedisOM for Python" "URL" "./integrate/redisom-for-python") - (dict "Text" "Ingest" "URL" "./integrate/redis-data-integration/") - (dict "Text" "Write-behind" "URL" "./integrate/write-behind/quickstart/write-behind-guide/") - ) - ) }} + "Title" "Build GenAI apps with Redis" + "Description" "Get started using Redis for retrieval augmented generation (RAG)" + "ButtonLink" "./develop/get-started/rag/" + "ButtonLabel" "Redis with RAG" + "LinksLeft" (slice + (dict "Text" "Redis for AI docs and code examples" "URL" "./develop/ai/") + ) + ) }} {{ partial "docs-section.html" (dict - "Title" "Redis Data Integration" - "Description" "Redis Data Integration keeps Redis in sync with the primary database in near real time." - "ButtonLink" "./integrate/redis-data-integration" - "ButtonLabel" "Learn more" - "LinksLeftTitle" "LEARN MORE" + "Title" "Optimize Redis for high-scale apps" + "Description" "Get the Redis Insight GUI on AWS EC2, Docker, Kubernetes, and desktop" + "ButtonLink" "./operate/redisinsight/install/" + "ButtonLabel" "Redis Insight install guide" "LinksLeft" (slice - (dict "Text" "Quick start" "URL" "./integrate/redis-data-integration/quick-start-guide") - (dict "Text" "Architecture" "URL" "./integrate/redis-data-integration/architecture") - (dict "Text" "Install" "URL" "./integrate/redis-data-integration/installation") - (dict "Text" "Reference" "URL" "./integrate/redis-data-integration/reference") + (dict "Text" "Performance and memory optimization user guide" "URL" "./develop/tools/insight/#database-analysis") ) ) }} {{ partial "docs-section.html" (dict - "Title" "Redis Insight" - "Description" "A cross-platform GUI for Redis, with focus on reducing memory usage and improving application performance." - "ButtonLink" "./develop/tools/insight/" - "ButtonLabel" "Learn more" - "LinksLeftTitle" "Learn more" + "Title" "Migrate data to or from Redis" + "Description" "Migrate data from files, data generators, relational databases, or snapshots" + "ButtonLink" "./integrate/riot/" + "ButtonLabel" "Redis input and output CLI tool" "LinksLeft" (slice - (dict "Text" "Install" "URL" "./operate/redisinsight/install/") - (dict "Text" "User guide" "URL" "./develop/tools/insight/") - (dict "Text" "Manage Streams in Redis Insight" "URL" "./develop/tools/insight/tutorials/insight-stream-consumer/") + (dict "Text" "Redis Input/Output Tool quick start" "URL" "./integrate/riot/quick-start/") ) ) }} + {{ partial "docs-section.html" (dict + "Title" "Connect with Redis client API libraries" + "Description" "Connect your application to a Redis database and try an example" + "ButtonLink" "./develop/clients/" + "ButtonLabel" "Redis client API library guides" + + "LinksLeft" (slice + (dict "Text" "Python" "URL" "./develop/clients/redis-py/") + (dict "Text" "C#/.NET" "URL" "./develop/clients/dotnet/") + (dict "Text" "JavaScript" "URL" "./develop/clients/nodejs/") + (dict "Text" "Java" "URL" "./develop/clients/jedis/") + (dict "Text" "Go" "URL" "./develop/clients/go/") + (dict "Text" "PHP" "URL" "./develop/clients/php/") + ) ) }}
-

Explore

+

Deploy

{{ partial "docs-section.html" (dict - "Title" "Redis and Stack" - "Description" "Redis Stack extends Redis with modern data models and processing engines." - "ButtonLink" "./operate/oss_and_stack/" - "ButtonLabel" "Read more" - "LinksLeftTitle" "Learn more" + "Title" "Control your Redis deployments on-premises and on cloud platforms" + "Description" "Robust support for hybrid and multi-AZ environments" + "ButtonLink" "./operate/rs/installing-upgrading/install/plan-deployment/" + "ButtonLabel" "Redis deployment planning" "LinksLeft" (slice - (dict "Text" "Install" "URL" "./operate/oss_and_stack/install/install-stack/") - (dict "Text" "Redis Query Engine" "URL" "./develop/interact/search-and-query/") - (dict "Text" "JSON" "URL" "./develop/data-types/json/") + (dict "Text" "Active-Active geo-distributed Redis" "URL" "./operate/rs/databases/active-active/") ) ) }} {{ partial "docs-section.html" (dict - "Title" "Redis Cloud" - "Description" "Deploy Redis Enterprise on Amazon Web Services, Google Cloud, or Microsoft Azure." - "ButtonLink" "./operate/rc" - "ButtonLabel" "Read more" - "LinksLeftTitle" "Learn more" + "Title" "Deploy on Kubernetes" + "Description" "Get started with the Redis Software for Kubernetes container image" + "ButtonLink" "./operate/kubernetes/deployment/quick-start/" + "ButtonLabel" "Kubernetes deployment guide" "LinksLeft" (slice - (dict "Text" "Quick start" "URL" "./operate/rc/rc-quickstart/") - (dict "Text" "Subscriptions" "URL" "./operate/rc/subscriptions/") - (dict "Text" "Databases" "URL" "./operate/rc/databases/") - (dict "Text" "Active-Active" "URL" "./operate/rc/databases/create-database/create-active-active-database/") + (dict "Text" "Deploy Redis Software for Kubernetes with OpenShift" "URL" "./operate/kubernetes/deployment/openshift/") ) ) }} {{ partial "docs-section.html" (dict - "Title" "Redis Enterprise Software" - "Description" "A self-managed data platform that unlocks the full potential of Redis at enterprise scale." - "ButtonLink" "./operate/rs" - "ButtonLabel" "Read more" - "LinksLeftTitle" "Learn more" + "Title" "Ingest and sync data to Redis" + "Description" "Sync Redis Software with live data from your disk-based databases" + "ButtonLink" "./integrate/redis-data-integration/quick-start-guide/" + "ButtonLabel" "Pipeline quick start" "LinksLeft" (slice - (dict "Text" "Quick start" "URL" "./operate/rs/installing-upgrading/quickstarts/redis-enterprise-software-quickstart/") - (dict "Text" "Clusters" "URL" "./operate/rs/clusters/") - (dict "Text" "Databases" "URL" "./operate/rs/databases/") - (dict "Text" "Networking" "URL" "./operate/rs/networking/") + (dict "Text" "Redis Data Integration feature overview" "URL" "./integrate/redis-data-integration/") ) ) }} {{ partial "docs-section.html" (dict - "Title" "Redis Enterprise for Kubernetes" - "Description" "Redis Enterprise deployed on containerized software platforms and integrated with our partners." - "ButtonLink" "./operate/kubernetes/" - "ButtonLabel" "Read more" - "LinksLeftTitle" "Learn more" - "LinksLeft" (slice - (dict "Text" "Architecture" "URL" "./operate/kubernetes/architecture/") - (dict "Text" "Deployments" "URL" "./operate/kubernetes/deployment/") - (dict "Text" "Clusters" "URL" "./operate/kubernetes/re-clusters/") - (dict "Text" "Databases" "URL" "./operate/kubernetes/re-databases/") + "Title" "Monitor Redis with Prometheus" + "Description" "Get started with the Redis Software integration for Prometheus and Grafana" + "ButtonLink" "./integrate/prometheus-with-redis-enterprise/" + "ButtonLabel" "Prometheus and Grafana with Redis" + "LinksLeft" (slice + (dict "Text" "Prometheus and Grafana with Redis Cloud" "URL" "./integrate/prometheus-with-redis-cloud/") ) ) }} + {{ partial "docs-section.html" (dict + "Title" "Client tools to connect to a Redis server" + "Description" "You can use the CLI, Redis Insight, or the Redis VSCode extension to connect to Redis" + "ButtonLink" "./develop/tools/" + "ButtonLabel" "Client tools overview guide" + "LinksLeft" (slice + (dict "Text" "Redis CLI" "URL" "./develop/tools/cli/") + (dict "Text" "Redis Insight" "URL" "./develop/tools/insight/") + (dict "Text" "Redis for VSCode" "URL" "./develop/tools/redis-for-vscode/") + ) ) }} +
diff --git a/layouts/operate/list.html b/layouts/operate/list.html index d3b03e723..66e01fe24 100644 --- a/layouts/operate/list.html +++ b/layouts/operate/list.html @@ -15,8 +15,53 @@

{{ .Title }}

- {{ with .Params.description }}

{{ . | markdownify }}

{{ end }} - + {{ with .Params.description }}

{{ . | markdownify }}

{{ end }} + {{ if .Params.categories }} +
+ + + + {{ $visibleCategories := slice }} + {{ range .Params.categories }} + {{ if not (in (slice "docs" "operate" "integrate" "develop") .) }} + {{ $visibleCategories = $visibleCategories | append (slice .) }} + {{ end }} + {{ end }} + {{ range $index, $element := $visibleCategories }} + {{ $displayName := $element }} + {{ $color := "#ffffff" }} + {{ if eq $element "rs" }} + {{ $displayName = "Redis Enterprise Software" }} + {{ $color = "#DCFF1E" }} + {{ else if eq $element "rc" }} + {{ $displayName = "Redis Cloud" }} + {{ $color = "#80DBFF" }} + {{ else if eq $element "kubernetes" }} + {{ $displayName = "Redis Enterprise for Kubernetes" }} + {{ $color = "#8A99A0" }} + {{ else if eq $element "oss" }} + {{ $displayName = "Redis Community Edition" }} + {{ $color = "#C795E3" }} + {{ else if eq $element "stack" }} + {{ $displayName = "Redis Stack" }} + {{ $color = "#49859C" }} + {{ else if eq $element "redisinsight" }} + {{ $displayName = "Redis Insight" }} + {{ $color = "#FD4439" }} + {{ end }} + + {{ end }} + + + + + +
+ {{ $displayName }} +
+
+{{ end }} + {{ if .Params.bannerText }} diff --git a/layouts/operate/single.html b/layouts/operate/single.html index 21f59282a..2ad476836 100644 --- a/layouts/operate/single.html +++ b/layouts/operate/single.html @@ -13,8 +13,58 @@

{{ partial "icons/logo-stack.html" (dict "context" . "class" "stack-logo-inline") }} {{ end }} {{ .Title }}

- {{ with .Params.description }}

{{ . | markdownify }}

{{ end }} - + {{ with .Params.description }}

{{ . | markdownify }}

{{ end }} + {{ if .Params.categories }} +
+ + + + {{ $visibleCategories := slice }} + {{ range .Params.categories }} + {{ if not (in (slice "docs" "operate" "integrate" "develop") .) }} + {{ $visibleCategories = $visibleCategories | append (slice .) }} + {{ end }} + {{ end }} + {{ range $index, $element := $visibleCategories }} + {{ $displayName := $element }} + {{ $color := "#ffffff" }} + {{ if eq $element "rs" }} + {{ $displayName = "Redis Enterprise Software" }} + {{ $color = "#DCFF1E" }} + {{ else if eq $element "rc" }} + {{ $displayName = "Redis Cloud" }} + {{ $color = "#80DBFF" }} + {{ else if eq $element "kubernetes" }} + {{ $displayName = "Redis Enterprise for Kubernetes" }} + {{ $color = "#8A99A0" }} + {{ else if eq $element "oss" }} + {{ $displayName = "Redis Community Edition" }} + {{ $color = "#C795E3" }} + {{ else if eq $element "stack" }} + {{ $displayName = "Redis Stack" }} + {{ $color = "#49859C" }} + {{ else if eq $element "redisinsight" }} + {{ $displayName = "Redis Insight" }} + {{ $color = "#FD4439" }} + {{ end }} + + {{ end }} + + + + + +
+ {{ $displayName }} +
+
+{{ end }} + + + + + + {{ if .Params.bannerText }} diff --git a/static/images/rc/database-details-configuration-tab-scalability-flexible.png b/static/images/rc/database-details-configuration-tab-scalability-flexible.png index c68440153..b711e337d 100644 Binary files a/static/images/rc/database-details-configuration-tab-scalability-flexible.png and b/static/images/rc/database-details-configuration-tab-scalability-flexible.png differ diff --git a/static/images/rc/database-new-flexible-scalability.png b/static/images/rc/database-new-flexible-scalability.png index 659590dde..d1d83f3f2 100644 Binary files a/static/images/rc/database-new-flexible-scalability.png and b/static/images/rc/database-new-flexible-scalability.png differ