From 4226e8257a25eb3f7a53e1e69695e12a8f80f35c Mon Sep 17 00:00:00 2001 From: James Ma <75407126+James96315@users.noreply.github.com> Date: Fri, 23 Aug 2024 14:22:15 +0800 Subject: [PATCH] Update to version v2.2.2 (#306) * Update to version v2.1.1 * Update version to v2.2.1 * Update version to v2.2.2 --------- Co-authored-by: James Ma --- CHANGELOG.md | 13 +++ .../api/app_log_ingestion/flb/flb_builder.py | 6 + .../flb/flb_template/pipeline.conf | 9 +- .../api/app_log_ingestion/test/test_flb.py | 6 + .../pipeline/log-processor/idx/idx_svc.py | 11 +- .../log-processor/idx/opensearch_client.py | 10 +- .../pipeline/log-processor/lambda_function.py | 4 +- .../lib/api/app-log-ingestion-stack.ts | 7 +- .../lib/api/fluent-bit-config-stack.ts | 7 +- source/constructs/lib/api/instance-stack.ts | 2 +- .../pipeline/common/opensearch-init-stack.ts | 5 +- source/constructs/package.json | 14 +-- source/portal/package.json | 2 +- source/portal/public/locales/en/cluster.json | 24 ++-- source/portal/public/locales/en/ekslog.json | 2 +- source/portal/public/locales/en/resource.json | 1 + source/portal/public/locales/zh/cluster.json | 22 ++-- source/portal/public/locales/zh/ekslog.json | 2 +- source/portal/public/locales/zh/resource.json | 1 + source/portal/src/assets/js/const.ts | 9 +- source/portal/src/assets/js/request.ts | 10 +- .../portal/src/components/layout/header.tsx | 3 +- .../src/pages/clusters/domain/DomainList.tsx | 44 ++++--- .../create/cloudfront/comps/SourceType.tsx | 18 ++- .../grafana/importGrafana/UpdateGrafana.tsx | 2 +- .../importGrafana/steps/ConfigServer.tsx | 4 +- .../pages/resources/common/InstanceTable.tsx | 8 +- .../pages/resources/common/LogConfigComp.tsx | 10 +- .../instanceGroup/comps/DetailEC2.tsx | 108 ++++++++++++------ source/portal/src/reducer/createLogConfig.ts | 26 +++-- source/portal/src/router/AmplifyAppRouter.tsx | 5 +- 31 files changed, 265 insertions(+), 130 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6502293c..1768db68 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,19 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [2.2.2] - 2024-08-23 + +### Changed + +- Support editing the auto-generated Regular Expression of Nginx and Apache log in the Log Config. #301 +- Adjusted the error logs from info level to error level in the log-processor function. #305 + +### Fixed + +- Fixed failed to retrieve instance status due to too many instances in the Instance Group details page. #298 +- Remove the redundant sign in alert dialog modal when user session expired. #303 +- Fixed the Grafana url validation failed with spaces when import a Grafana Server. #304 + ## [2.2.1] - 2024-07-22 ### Added diff --git a/source/constructs/lambda/api/app_log_ingestion/flb/flb_builder.py b/source/constructs/lambda/api/app_log_ingestion/flb/flb_builder.py index b8358563..8199cb2a 100644 --- a/source/constructs/lambda/api/app_log_ingestion/flb/flb_builder.py +++ b/source/constructs/lambda/api/app_log_ingestion/flb/flb_builder.py @@ -331,6 +331,11 @@ def _get_flb_params(self): WithDecryption=True, )["Parameter"]["Value"] + storage_total_limit_size = ssm_cli.get_parameter( + Name=f"/{stack_prefix}/FLB/storage_total_limit_size", + WithDecryption=True, + )["Parameter"]["Value"] + flb_params = { "log_level": log_level, "flush": flush, @@ -342,6 +347,7 @@ def _get_flb_params(self): "store_dir_limit_size": store_dir_limit_size, "storage_type": storage_type, "storage_pause_on_chunks_overlimit": storage_pause_on_chunks_overlimit, + "storage_total_limit_size": storage_total_limit_size, } return flb_params diff --git a/source/constructs/lambda/api/app_log_ingestion/flb/flb_template/pipeline.conf b/source/constructs/lambda/api/app_log_ingestion/flb/flb_template/pipeline.conf index 9b2d2c20..87fdca2c 100644 --- a/source/constructs/lambda/api/app_log_ingestion/flb/flb_template/pipeline.conf +++ b/source/constructs/lambda/api/app_log_ingestion/flb/flb_template/pipeline.conf @@ -178,6 +178,7 @@ {% endif %} {{placeholder}} Retry_Limit {{ssm_params.retry_limit}} {{placeholder}} Role_arn {{item.role_arn}} +{{placeholder}} storage.total_limit_size {{ssm_params.storage_total_limit_size}} {{placeholder}} {% elif item.output_name=='MSK' %} {{placeholder}}[OUTPUT] @@ -194,6 +195,7 @@ {{placeholder}} rdkafka.compression.type snappy {{placeholder}} rdkafka.security.protocol plaintext {{placeholder}} Retry_Limit {{ssm_params.retry_limit}} +{{placeholder}} storage.total_limit_size {{ssm_params.storage_total_limit_size}} {{placeholder}} {% elif item.output_name=='S3' %} {{placeholder}}[OUTPUT] @@ -204,7 +206,9 @@ {{placeholder}} region {{item.region_name}} {{placeholder}} total_file_size {{item.s3.max_file_size}}M {{placeholder}} upload_timeout {{item.s3.upload_timeout}}s -{{placeholder}} store_dir_limit_size {{ssm_params.store_dir_limit_size}} +{{placeholder}} #store_dir_limit_size {{ssm_params.store_dir_limit_size}} +{{placeholder}} #FluentBit version 2.0 and above, please use the store_dir_limit_size parameter. Don't use storage.total_limit_size +{{placeholder}} storage.total_limit_size {{ssm_params.store_dir_limit_size}} {{placeholder}} use_put_object true {% if item.s3.compression_type | lower == "gzip" %} {{placeholder}} s3_key_format /{{item.s3.prefix}}/%Y-%m-%d-%H-%M-%S-$UUID.gz @@ -235,7 +239,7 @@ {{placeholder}} AWS_Auth On {{placeholder}} TLS On {{placeholder}} Suppress_Type_Name On -{{placeholder}} Buffer_Size 20M +{{placeholder}} Buffer_Size 50M {{placeholder}} #When enabled, generate _id for outgoing records. This prevents duplicate records when retrying. {{placeholder}} Generate_ID On {{placeholder}} Logstash_Format Off @@ -244,6 +248,7 @@ {{placeholder}} Time_Key_Nanos Off {{placeholder}} Write_Operation create {{placeholder}} AWS_Role_ARN {{item.role_arn}} +{{placeholder}} storage.total_limit_size {{ssm_params.storage_total_limit_size}} {{placeholder}} {% endif %} {% endwith %} diff --git a/source/constructs/lambda/api/app_log_ingestion/test/test_flb.py b/source/constructs/lambda/api/app_log_ingestion/test/test_flb.py index aabce752..62198e42 100644 --- a/source/constructs/lambda/api/app_log_ingestion/test/test_flb.py +++ b/source/constructs/lambda/api/app_log_ingestion/test/test_flb.py @@ -76,6 +76,12 @@ def mock_ssm_context(): Type="String", ) + ssm_client.put_parameter( + Name=f"/{stack_prefix}/FLB/storage_total_limit_size", + Value="off", + Type="String", + ) + yield diff --git a/source/constructs/lambda/pipeline/log-processor/idx/idx_svc.py b/source/constructs/lambda/pipeline/log-processor/idx/idx_svc.py index 5451918f..06b6db2b 100644 --- a/source/constructs/lambda/pipeline/log-processor/idx/idx_svc.py +++ b/source/constructs/lambda/pipeline/log-processor/idx/idx_svc.py @@ -91,10 +91,13 @@ def run_func_with_retry( break logger.error("%s failed: %s", func_name, response.text) if response.status_code == 403 or response.status_code == 409: - logger.info( - "Please add access to OpenSearch for this Lambda and rerun this" - ) + logger.info("Please add access to OpenSearch for this Lambda") if response.status_code == 403: + logger.error( + "the last response code is %d, the last response content is %s", + response.status_code, + response.content, + ) self.map_backend_role() raise APIException( ErrorCode.UNKNOWN_ERROR, @@ -107,7 +110,7 @@ def run_func_with_retry( func_name, retry, ) - logger.info( + logger.error( "the last response code is %d, the last response content is %s", response.status_code, response.content, diff --git a/source/constructs/lambda/pipeline/log-processor/idx/opensearch_client.py b/source/constructs/lambda/pipeline/log-processor/idx/opensearch_client.py index b3f57fb7..7515c8c5 100644 --- a/source/constructs/lambda/pipeline/log-processor/idx/opensearch_client.py +++ b/source/constructs/lambda/pipeline/log-processor/idx/opensearch_client.py @@ -120,9 +120,9 @@ def create_ism_policy( response = requests.put( url, auth=self._awsauth, json=policy_doc, timeout=30 ) - logger.info("--> update ism policy response code %d", response.status_code) + logger.error("--> update ism policy response code %d", response.status_code) return response - logger.info( + logger.error( "the last response code is %d, the last response content is %s", response.status_code, response.content, @@ -348,6 +348,12 @@ def add_master_role(self, role_arn: str): status_code = resp["ResponseMetadata"]["HTTPStatusCode"] logger.info("Response status: %d", status_code) if status_code not in (200, 201): + logger.error( + "Add backend role %s to domain %s, response status: %d", + role_arn, + domain_name, + status_code, + ) raise APIException( ErrorCode.UNKNOWN_ERROR, "Failed to add backend role {role_arn} to domain {domain_name}", diff --git a/source/constructs/lambda/pipeline/log-processor/lambda_function.py b/source/constructs/lambda/pipeline/log-processor/lambda_function.py index 9118c0d6..86f392f2 100644 --- a/source/constructs/lambda/pipeline/log-processor/lambda_function.py +++ b/source/constructs/lambda/pipeline/log-processor/lambda_function.py @@ -71,7 +71,9 @@ def handle_sqs_retries(record): "This message has exceeded the maximum number of retries, verify that you can connect to OpenSearch or that the data type does not match the field type defined for the index", ) else: - raise Exception(f"Error processing SQS message: {record}") + raise Exception( + f"Error processing SQS message: {record}, Lambda function has been called {approximate_receive_count} times, the message will be re-consumed and then retried!" + ) def change_sqs_message_visibility(event_record): diff --git a/source/constructs/lib/api/app-log-ingestion-stack.ts b/source/constructs/lib/api/app-log-ingestion-stack.ts index a310e176..eb6a4fb5 100644 --- a/source/constructs/lib/api/app-log-ingestion-stack.ts +++ b/source/constructs/lib/api/app-log-ingestion-stack.ts @@ -122,11 +122,6 @@ export class AppLogIngestionStack extends Construct { description: 'Download Fluent-Bit config file and reboot the Fluent-Bit', parameters: { - ARCHITECTURE: { - type: "String", - default: "", - description: "(Required) Machine Architecture" - }, INSTANCEID: { type: 'String', default: '', @@ -146,7 +141,7 @@ export class AppLogIngestionStack extends Construct { name: "updateFluentBitVersion", inputs: { runCommand: [ - `[ -e /opt/fluent-bit/bin/fluent-bit ] && [ -z \"$(/opt/fluent-bit/bin/fluent-bit -V | grep '${FluentBitVersion}')\" ] && curl -o /opt/fluent-bit{{ARCHITECTURE}}.tar.gz 'https://${flb_s3_addr}/clo/${process.env.VERSION}/aws-for-fluent-bit/fluent-bit{{ARCHITECTURE}}.tar.gz' && tar xzvf /opt/fluent-bit{{ARCHITECTURE}}.tar.gz -C /opt/ --exclude=fluent-bit/etc; echo 0` + `ARCHITECTURE=''; if [ \"$(uname -m)\" = \"aarch64\" ]; then ARCHITECTURE='-arm64'; fi; [ -e /opt/fluent-bit/bin/fluent-bit ] && [ -z \"$(/opt/fluent-bit/bin/fluent-bit -V | grep '${FluentBitVersion}')\" ] && curl -o /opt/fluent-bit$ARCHITECTURE.tar.gz https://${flb_s3_addr}/clo/${process.env.VERSION}/aws-for-fluent-bit/fluent-bit$ARCHITECTURE.tar.gz && tar xzvf /opt/fluent-bit$ARCHITECTURE.tar.gz -C /opt/ --exclude=fluent-bit/etc; echo 0` ] }, }, diff --git a/source/constructs/lib/api/fluent-bit-config-stack.ts b/source/constructs/lib/api/fluent-bit-config-stack.ts index afb50869..f77c9491 100644 --- a/source/constructs/lib/api/fluent-bit-config-stack.ts +++ b/source/constructs/lib/api/fluent-bit-config-stack.ts @@ -67,7 +67,7 @@ export class FluentBitConfigStack extends Construct { }); new ssm.StringParameter(this, "FlbBufferSizeParameter", { //NOSONAR parameterName: `/${props.stackPrefix}/FLB/buffer_size`, - description: "Set the buffer size for HTTP client when reading responses from Kubernetes API server. A value of 0 results in no limit, and the buffer will expand as-needed.", + description: "Set the buffer size for HTTP client when reading responses from Kubernetes API server and the buffer size to read data in INPUT plugin. A value of 0 results in no limit, and the buffer will expand as-needed.", stringValue: "0", }); new ssm.StringParameter(this, "FlbRetryLimitParameter", { //NOSONAR @@ -90,6 +90,11 @@ export class FluentBitConfigStack extends Construct { description: "This parameter is to specifies if file storage is to be paused when reaching the chunk limit. Default is off", stringValue: "off", }); + new ssm.StringParameter(this, "FlbStorageTotalLimitSizeParameter", { //NOSONAR + parameterName: `/${props.stackPrefix}/FLB/storage_total_limit_size`, + description: "This parameter is to limit the maximum number of Chunks in the filesystem for the current output logical destination. Default is 500M", + stringValue: "500M", + }); diff --git a/source/constructs/lib/api/instance-stack.ts b/source/constructs/lib/api/instance-stack.ts index faddd251..9647c5c2 100644 --- a/source/constructs/lib/api/instance-stack.ts +++ b/source/constructs/lib/api/instance-stack.ts @@ -374,7 +374,7 @@ export class InstanceStack extends Construct { runtime: lambda.Runtime.PYTHON_3_11, handler: 'lambda_function.lambda_handler', timeout: Duration.minutes(5), - memorySize: 4096, + memorySize: 1024, logFormat: "JSON", applicationLogLevel: "INFO", systemLogLevel: "WARN", diff --git a/source/constructs/lib/pipeline/common/opensearch-init-stack.ts b/source/constructs/lib/pipeline/common/opensearch-init-stack.ts index 8508d9e5..087b94cd 100644 --- a/source/constructs/lib/pipeline/common/opensearch-init-stack.ts +++ b/source/constructs/lib/pipeline/common/opensearch-init-stack.ts @@ -257,9 +257,6 @@ export class OpenSearchInitStack extends Construct { vpc: props.vpc, vpcSubnets: { subnetType: SubnetType.PRIVATE_WITH_EGRESS }, securityGroups: [props.securityGroup], - logFormat: "JSON", - applicationLogLevel: "ERROR", - systemLogLevel: "WARN", environment: Object.assign( { ENDPOINT: props.endpoint, @@ -320,7 +317,7 @@ export class OpenSearchInitStack extends Construct { SOURCE: props.source, WRITE_IDX_DATA: props.writeIdxData || "True", NO_BUFFER_ACCESS_ROLE_ARN: props.noBufferAccessRoleArn || "", - // EVENT_BRIDGE_RULE_NAME: props.eventBridgeRuleName || "", + POWERTOOLS_LOG_LEVEL: "ERROR", }, props.env), layers: [SharedPythonLayer.getInstance(this), pipeLayer], diff --git a/source/constructs/package.json b/source/constructs/package.json index b4300fe7..4ad20a3d 100644 --- a/source/constructs/package.json +++ b/source/constructs/package.json @@ -25,8 +25,8 @@ "@types/jest": "29.5.2", "@types/node": "20.9.0", "@types/prettier": "3.0.0", - "aws-cdk": "v2.149.0", - "aws-cdk-lib": "v2.149.0", + "aws-cdk": "2.151.0", + "aws-cdk-lib": "2.151.0", "constructs": "^10.3.0", "jest": "29.7.0", "ts-jest": "29.1.0", @@ -43,12 +43,12 @@ }, "dependencies": { "@aws-cdk/aws-appsync-alpha": "2.59.0-alpha.0", - "@aws-cdk/aws-glue-alpha": "v2.149.0-alpha.0", - "@aws-cdk/aws-kinesisfirehose-alpha": "v2.149.0-alpha.0", - "@aws-cdk/aws-kinesisfirehose-destinations-alpha": "v2.149.0-alpha.0", + "@aws-cdk/aws-glue-alpha": "2.151.0-alpha.0", + "@aws-cdk/aws-kinesisfirehose-alpha": "2.151.0-alpha.0", + "@aws-cdk/aws-kinesisfirehose-destinations-alpha": "2.151.0-alpha.0", "@aws-solutions-constructs/aws-cloudfront-s3": "2.47.0", - "aws-cdk": "v2.149.0", - "aws-cdk-lib": "v2.149.0", + "aws-cdk": "2.151.0", + "aws-cdk-lib": "2.151.0", "cdk-nag": "2.27.66", "constructs": "10.3.0", "source-map-support": "0.5.21" diff --git a/source/portal/package.json b/source/portal/package.json index a1d68aab..5133c0fe 100644 --- a/source/portal/package.json +++ b/source/portal/package.json @@ -24,7 +24,7 @@ "aws-amplify": "^5.3.12", "aws-appsync-auth-link": "^3.0.7", "aws-appsync-subscription-link": "^3.1.2", - "axios": "^1.6.0", + "axios": "^1.7.4", "classnames": "^2.3.2", "date-fns": "^2.30.0", "graphql-tag": "^2.12.6", diff --git a/source/portal/public/locales/en/cluster.json b/source/portal/public/locales/en/cluster.json index 8931b452..e7014f9d 100644 --- a/source/portal/public/locales/en/cluster.json +++ b/source/portal/public/locales/en/cluster.json @@ -191,22 +191,22 @@ "creation": "Network creation", "method": "Creation Method", "auto": "Automatic", - "autoDesc": "Create a dedicated VPC for log processing layer and public access layer to connect to the OpenSearch domain via VPC peering.", + "autoDesc": "Automatically create the VPC Peering Connection between the log processing layer and the OpenSearch VPC if needed.", "manual": "Manual", - "manualDesc": "Manually specify the log processing layer and public access layer network settings.", - "layerNetwork": "Log processing layer network", - "layerNetworkDesc": "Solution requires one Log Processing Subnet Group. The log processing functions will reside in the log processing subnet and use the associated Security Group.", + "manualDesc": "Manually specify the log processing layer and update route tables and security group.", + "layerNetwork": "Log processing network", + "layerNetworkDesc": "Solution requires one log processing subnet group. The log processing functions will reside in the log processing subnet and use the associated security group.", "configTip": "Configure network", - "configTipDesc": "You have chosen a VPC other than the VPC of the selected OpenSearch domain. Please create VPC Peering, configure Route Table, and make sure the resources have access to the domain.", + "configTipDesc": "You have chosen a VPC other than the VPC of the selected Amazon OpenSearch domain. Please make sure the VPC can connect to the selected OpenSearch domain. ", "vpc": "VPC", - "vpcDesc": "Select the VPC of the log processing layer. The VPC must be the same VPC as the cluster or connected via VPC Peering.", + "vpcDesc": "Select the Virtual Private Cloud (VPC) for the log processing layer. The chosen VPC should align with the VPC of the cluster, or it must be connected through VPC Peering Connection or Transit Gateway.", "chooseVPC": "Choose a VPC", - "logProcessSubnetGroup": "Log Processing Subnet Group", - "logProcessSubnetGroupDesc": "Select at least two subnets of the log processing layer. It is recommended to select private subnets.", - "chooseSubnet": "Choose Subnet", - "logProcessSG": "Log Processing Security Group", - "logProcessSGDesc": "Select a Security Group for the log processing layer. The Security Group must have access the OpenSearch domain.", - "chooseSG": "Choose a Security Group" + "logProcessSubnetGroup": "Log processing subnet group", + "logProcessSubnetGroupDesc": "Choose a minimum of two subnets for the log processing layer. These subnets must be configured as private, with routing set up to connect to a NAT gateway.", + "chooseSubnet": "Choose two subnets", + "logProcessSG": "Log processing security group", + "logProcessSGDesc": "Choose a security group for the log processing layer. This security group must be configured to allow access to the OpenSearch cluster. If the connection is through a NAT gateway, port 443 needs to be open", + "chooseSG": "Choose a security group" } }, "imported": { diff --git a/source/portal/public/locales/en/ekslog.json b/source/portal/public/locales/en/ekslog.json index 9fe07509..eb864c94 100644 --- a/source/portal/public/locales/en/ekslog.json +++ b/source/portal/public/locales/en/ekslog.json @@ -61,7 +61,7 @@ }, "daemonsetGuide": { "alert": "", - "alertDesc": "Please ensure that all log source(s) you have created are in the Created state before proceeding with the following steps. All steps in this procedure are to deploy Fluent Bit agent as a DaemonSet. By default, the namespace \"logging\" is used", + "alertDesc": "Please ensure that all log source(s) you have created are in the Created state before proceeding with the following steps. All steps in this procedure are to deploy Fluent Bit agent as a DaemonSet. By default, the namespace \"logging\" is used. If the YAML file changes, you need to restart the relevant FluentBit Pods.", "step1": "You will need to copy and paste all the content below in a file ~/fluent-bit-logging.yaml", "step2": "Use the kubectl command to deploy fluent-bit as DaemonSet.", "createIngestion": "Please create an log source first" diff --git a/source/portal/public/locales/en/resource.json b/source/portal/public/locales/en/resource.json index 8a2cb587..b203dc7c 100644 --- a/source/portal/public/locales/en/resource.json +++ b/source/portal/public/locales/en/resource.json @@ -160,6 +160,7 @@ "apacheFormatError": "The specified format is invalid, a standard APACHE configuration file usually begins with LogFormat.", "regexFormat": "Regular Expression", "regexFormatDesc": "Input the regular expression to parse the log.", + "regexFormatDescNginxApache": "Input the regular expression to parse the log. Please do not change the regular expression match group name (log field).", "sampleParsing": "Sample log parsing", "parser": "Parser", "parserDesc": "Select the supported parser.", diff --git a/source/portal/public/locales/zh/cluster.json b/source/portal/public/locales/zh/cluster.json index 6e9147a2..7a350f2b 100644 --- a/source/portal/public/locales/zh/cluster.json +++ b/source/portal/public/locales/zh/cluster.json @@ -191,22 +191,22 @@ "creation": "网络创建", "method": "创建方式", "auto": "自动", - "autoDesc": "为日志处理层和公共访问层创建一个专用 VPC,以通过 VPC 对等连接到 OpenSearch 域。", + "autoDesc": "如果需要,自动创建日志处理层和 OpenSearch VPC 之间的 VPC 对等连接。", "manual": "手动", - "manualDesc": "手动指定日志处理层和公共访问层网络设置。", - "layerNetwork": "日志处理层网络", - "layerNetworkDesc": "解决方案需要一个日志处理子网组。日志处理功能将驻留在日志处理子网中并使用关联的安全组。", + "manualDesc": "手动指定日志处理层并更新路由表和安全组。", + "layerNetwork": "日志处理网络", + "layerNetworkDesc": "解决方案需要一个日志处理子网组。日志处理功能将在日志处理子网中运行,并使用相关联的安全组。", "configTip": "配置网络", - "configTipDesc": "您选择的VPC不是所选 OpenSearch 域的 VPC。请创建VPC对等连接,配置路由表,并确保资源可以访问域。", + "configTipDesc": "您选择了与所选 Amazon OpenSearch 域的 VPC 不同的 VPC。请确保 VPC 可以连接到所选 OpenSearch 域。", "vpc": "VPC", - "vpcDesc": "选择日志处理层的 VPC。 VPC 必须与通过 VPC 对等连接的集群为同一 VPC。", - "chooseVPC": "选择一个VPC", + "vpcDesc": "选择用于日志处理层的虚拟私有云 (VPC)。选择的 VPC 应与集群的 VPC 对齐,或者必须通过 VPC 对等连接或传输网关进行连接。", + "chooseVPC": "选择一个 VPC", "logProcessSubnetGroup": "日志处理子网组", - "logProcessSubnetGroupDesc": "至少选择两个日志处理层的子网。建议选择私有子网。", - "chooseSubnet": "选择子网", + "logProcessSubnetGroupDesc": "选择至少两个子网用于日志处理层。这些子网必须配置为私有,并设置路由以连接到 NAT 网关。", + "chooseSubnet": "选择两个子网", "logProcessSG": "日志处理安全组", - "logProcessSGDesc": "为日志处理层选择一个安全组。 安全组必须有权访问 OpenSearch 域。", - "chooseSG": "选择安全组" + "logProcessSGDesc": "选择一个用于日志处理层的安全组。此安全组必须配置为允许访问 OpenSearch 集群。如果连接是 NAT 网关那么需要对外开放 443 端口", + "chooseSG": "选择一个安全组" } }, "imported": { diff --git a/source/portal/public/locales/zh/ekslog.json b/source/portal/public/locales/zh/ekslog.json index 7337fea6..56263492 100644 --- a/source/portal/public/locales/zh/ekslog.json +++ b/source/portal/public/locales/zh/ekslog.json @@ -61,7 +61,7 @@ }, "daemonsetGuide": { "alert": "", - "alertDesc": "请确保您所创建的所有的摄取都处于已创建状态,然后再继续执行以下步骤。此过程中的所有步骤都是将 Fluent Bit 代理部署为 DaemonSet。 默认情况下,使用命名空间 'logging'", + "alertDesc": "请确保您所创建的所有的摄取都处于已创建状态,然后再继续执行以下步骤。此过程中的所有步骤都是将 Fluent Bit 代理部署为 DaemonSet。 默认情况下,使用命名空间 'logging'. 请注意, 如果 Yaml 文件发生了变更,那么您需要重启相关的 FluentBit Pod。", "step1": "您需要将以下所有内容复制并粘贴到文件 ~/fluent-bit-logging.yaml 中", "step2": "使用 kubectl 命令将 fluent-bit 部署为 DaemonSet。", "createIngestion": "请先创建一个摄取" diff --git a/source/portal/public/locales/zh/resource.json b/source/portal/public/locales/zh/resource.json index 7c8afcc3..61470c27 100644 --- a/source/portal/public/locales/zh/resource.json +++ b/source/portal/public/locales/zh/resource.json @@ -160,6 +160,7 @@ "apacheFormatError": "指定的格式无效,标准的 Apache 配置文件通常以 LogFormat 开头。", "regexFormat": "正则表达式", "regexFormatDesc": "输入日志的正则表达式。", + "regexFormatDescNginxApache": "输入正则表达式来解析日志。请不要更改正则表达式匹配组名称(日志字段)。", "sampleParsing": "示例日志解析", "parser": "解析器", "parserDesc": "选择支持的解析器。", diff --git a/source/portal/src/assets/js/const.ts b/source/portal/src/assets/js/const.ts index f2d8407a..b2c9c7e8 100644 --- a/source/portal/src/assets/js/const.ts +++ b/source/portal/src/assets/js/const.ts @@ -36,6 +36,7 @@ import { SyslogParser, } from "API"; import { OptionType } from "components/AutoComplete/autoComplete"; +import { StatusType } from "components/Status/Status"; export const INVALID = "invalid"; export const AUTO_REFRESH_INT = 8000; @@ -46,7 +47,13 @@ export const SOLUTION_REPO_NAME = "centralized-logging-with-opensearch"; export const LINUX_FLB_AGENT_VERSION = "FluentBit 1.9.10"; export const WINDOWS_FLB_AGENT_VERSION = "FluentBit 3.0.4 (Community)"; -export const getFLBVersionByType = (type?: EC2GroupPlatform) => { +export const getFLBVersionByType = ( + type?: EC2GroupPlatform, + status?: StatusType +) => { + if (status === StatusType.Unknown) { + return "-"; + } return type === EC2GroupPlatform.Windows ? WINDOWS_FLB_AGENT_VERSION : LINUX_FLB_AGENT_VERSION; diff --git a/source/portal/src/assets/js/request.ts b/source/portal/src/assets/js/request.ts index fe76358e..74dd1abe 100644 --- a/source/portal/src/assets/js/request.ts +++ b/source/portal/src/assets/js/request.ts @@ -120,7 +120,15 @@ export const appSyncRequestQuery = (query: any, params?: any): any => { resolve(decodedResData); } catch (error) { const showError: any = error; - if (showError?.networkError?.statusCode === 401) { + const headerElement = document.getElementById("cloSignedHeader"); + // escape GetMetricHistoryData for 401 error + const r = /query\s(\w+)\s*\(/g; + const res: any = r.exec(query); + if ( + res?.[1] !== "GetMetricHistoryData" && + headerElement && + showError?.networkError?.statusCode === 401 + ) { Alert( i18n.t("signin.reSignInDesc"), i18n.t("signin.reSignIn"), diff --git a/source/portal/src/components/layout/header.tsx b/source/portal/src/components/layout/header.tsx index 841c01ab..91731cd2 100644 --- a/source/portal/src/components/layout/header.tsx +++ b/source/portal/src/components/layout/header.tsx @@ -37,7 +37,7 @@ const LHeader: React.FC = (props: SignedInAppProps) => { }; return ( -
+
{t("header.name")}
{
@@ -45,6 +45,7 @@ const LHeader: React.FC = (props: SignedInAppProps) => { {amplifyConfig.aws_appsync_authenticationType === AppSyncAuthType.OPEN_ID && ( { diff --git a/source/portal/src/pages/clusters/domain/DomainList.tsx b/source/portal/src/pages/clusters/domain/DomainList.tsx index ba072334..afb23088 100644 --- a/source/portal/src/pages/clusters/domain/DomainList.tsx +++ b/source/portal/src/pages/clusters/domain/DomainList.tsx @@ -72,7 +72,7 @@ const ESDomainList: React.FC = () => { const [curTipsDomain, setCurTipsDomain] = useState(); const [openDeleteModel, setOpenDeleteModel] = useState(false); const [loadingDelete, setLoadingDelete] = useState(false); - const [reverseOrKeep, setReverseOrKeep] = useState("unset"); + const [reverseOrKeep, setReverseOrKeep] = useState("keep"); const [removeCancel, setRemoveCancel] = useState(false); const [removeErrorMessage, setRemoveErrorMessage] = useState(); const [domainRelatedResources, setDomainRelatedResources] = useState< @@ -136,8 +136,23 @@ const ESDomainList: React.FC = () => { }); const domainRelevantResource: DomainRelevantResource[] = resData.data.getDomainDetails.resources; - console.info("domainRelevantResource: ", domainRelevantResource); - setDomainRelatedResources(domainRelevantResource); + // handle resource contains null + const filteredDomainRelevantResources: DomainRelevantResource[] = []; + if (domainRelevantResource.length > 0) { + domainRelevantResource.forEach((element: DomainRelevantResource) => { + const valueHasNotNone = element?.values?.every( + (e) => e !== null && e !== "" && e !== "null" + ); + if ( + element?.values && + element?.values.length > 0 && + valueHasNotNone + ) { + filteredDomainRelevantResources.push({ ...element }); + } + }); + } + setDomainRelatedResources(filteredDomainRelevantResources); setLoadingResources(false); } catch (error) { setLoadingResources(false); @@ -364,7 +379,8 @@ const ESDomainList: React.FC = () => { loading={loadingDelete} btnType="primary" disabled={ - selectedDomains.length === 1 && reverseOrKeep === "unset" + (selectedDomains.length === 1 && reverseOrKeep === "unset") || + loadingResources } onClick={() => { if (!removeCancel) { @@ -429,29 +445,29 @@ const ESDomainList: React.FC = () => { )}
{/* For old version (v1.x), not allow customer to choose reverse changes */} - {!removeCancel && domainRelatedResources && ( -
+ {!removeCancel && domainRelatedResources.length > 0 && ( +
)} - {!removeCancel && ( -
+ {!removeCancel && domainRelatedResources.length > 0 && ( +
)} diff --git a/source/portal/src/pages/dataInjection/serviceLog/create/cloudfront/comps/SourceType.tsx b/source/portal/src/pages/dataInjection/serviceLog/create/cloudfront/comps/SourceType.tsx index 7b7b4739..33c80fa1 100644 --- a/source/portal/src/pages/dataInjection/serviceLog/create/cloudfront/comps/SourceType.tsx +++ b/source/portal/src/pages/dataInjection/serviceLog/create/cloudfront/comps/SourceType.tsx @@ -248,7 +248,9 @@ const SourceType: React.FC = (props: LogTypeProps) => { placeholder={t("servicelog:cloudfront.selectLogType")} className="m-w-45p" optionList={ - cloudFrontTask.logSourceAccountId || region.startsWith("cn") || standardOnly + cloudFrontTask.logSourceAccountId || + region.startsWith("cn") || + standardOnly ? CLOUDFRONT_LOG_STANDARD : CLOUDFRONT_LOG_TYPE } @@ -351,13 +353,21 @@ const SourceType: React.FC = (props: LogTypeProps) => { } > { - changeSamplingRate && - changeSamplingRate(event.target.value); + const newValue = event.target.value; + if (/^\d*$/.test(newValue)) { + const number = parseInt(newValue, 10); + if ( + (number >= 1 && number <= 100) || + newValue === "" + ) { + changeSamplingRate?.(event.target.value); + } + } }} /> diff --git a/source/portal/src/pages/lightEngine/grafana/importGrafana/UpdateGrafana.tsx b/source/portal/src/pages/lightEngine/grafana/importGrafana/UpdateGrafana.tsx index 51cb0105..b8591d85 100644 --- a/source/portal/src/pages/lightEngine/grafana/importGrafana/UpdateGrafana.tsx +++ b/source/portal/src/pages/lightEngine/grafana/importGrafana/UpdateGrafana.tsx @@ -46,7 +46,7 @@ export const UpdateGrafana: React.FC = () => { const url = queryParams.get("url"); const name = queryParams.get("name"); dispatch(grafana.actions.nameChanged(decodeURIComponent(name ?? ""))); - dispatch(grafana.actions.urlChanged(decodeURIComponent(url ?? ""))); + dispatch(grafana.actions.urlChanged(decodeURIComponent(url?.trim() ?? ""))); }, [location.search]); const navigate = useNavigate(); diff --git a/source/portal/src/pages/lightEngine/grafana/importGrafana/steps/ConfigServer.tsx b/source/portal/src/pages/lightEngine/grafana/importGrafana/steps/ConfigServer.tsx index 0c1a8177..8721471f 100644 --- a/source/portal/src/pages/lightEngine/grafana/importGrafana/steps/ConfigServer.tsx +++ b/source/portal/src/pages/lightEngine/grafana/importGrafana/steps/ConfigServer.tsx @@ -39,13 +39,13 @@ export const ConfigServer = ( const changeUrl = (url: string) => { if (url.length <= MAX_INPUT_LENGTH) { - dispatch(grafana.actions.urlChanged(url)); + dispatch(grafana.actions.urlChanged(url.trim())); } }; const changeToken = (token: string) => { if (token.length <= MAX_INPUT_LENGTH) { - dispatch(grafana.actions.tokenChanged(token)); + dispatch(grafana.actions.tokenChanged(token.trim())); } }; diff --git a/source/portal/src/pages/resources/common/InstanceTable.tsx b/source/portal/src/pages/resources/common/InstanceTable.tsx index 938220e1..66004c15 100644 --- a/source/portal/src/pages/resources/common/InstanceTable.tsx +++ b/source/portal/src/pages/resources/common/InstanceTable.tsx @@ -47,7 +47,7 @@ import SelectPlatform from "./SelectPlatform"; const PAGE_SIZE = 50; const REFRESH_INTERVAL = 20000; // 20 seconds to refresh -interface InstanceItemType { +export interface InstanceItemType { computerName: string; id: string; ipAddress: string; @@ -55,19 +55,19 @@ interface InstanceItemType { platformName: string; } -interface ListInstanceResponse { +export interface ListInstanceResponse { instances: InstanceItemType[]; nextToken: string; } -interface InstanceStatusType { +export interface InstanceStatusType { curlOutput: string; instanceId: string; invocationOutput: string; status: string; } -interface CommandResponse { +export interface CommandResponse { commandId: string; instanceAgentStatusList: InstanceStatusType[]; } diff --git a/source/portal/src/pages/resources/common/LogConfigComp.tsx b/source/portal/src/pages/resources/common/LogConfigComp.tsx index bf38c315..62a0e454 100644 --- a/source/portal/src/pages/resources/common/LogConfigComp.tsx +++ b/source/portal/src/pages/resources/common/LogConfigComp.tsx @@ -41,6 +41,7 @@ import { getRegexTitle, iisLogParserChanged, isJSONType, + isNginxOrApache, isWindowsEvent, multiLineParserChanged, regexChanged, @@ -308,9 +309,12 @@ const LogConfigComp = (props: LogConfigCompProps) => { }} /> )} -
- {t("resource:config.common.regName") + logConfig.data.regex} -
+ {!isNginxOrApache(logConfig.data.logType) && ( +
+ {t("resource:config.common.regName") + + logConfig.data.regex} +
+ )}
)} diff --git a/source/portal/src/pages/resources/instanceGroup/comps/DetailEC2.tsx b/source/portal/src/pages/resources/instanceGroup/comps/DetailEC2.tsx index f1a466f9..42b54749 100644 --- a/source/portal/src/pages/resources/instanceGroup/comps/DetailEC2.tsx +++ b/source/portal/src/pages/resources/instanceGroup/comps/DetailEC2.tsx @@ -23,7 +23,7 @@ import { EC2GroupPlatform, } from "API"; import { SelectType, TablePanel } from "components/TablePanel"; -import Status from "components/Status/Status"; +import Status, { StatusType } from "components/Status/Status"; import { getFLBVersionByType } from "assets/js/const"; import { useTranslation } from "react-i18next"; import { buildEC2LInk, defaultStr } from "assets/js/utils"; @@ -35,6 +35,7 @@ import { getInstanceAgentStatus, listInstances } from "graphql/queries"; import { updateLogSource } from "graphql/mutations"; import Modal from "components/Modal"; import InstanceTable, { + CommandResponse, InstanceWithStatusType, } from "pages/resources/common/InstanceTable"; import { Alert, handleErrorMessage } from "assets/js/alert"; @@ -60,7 +61,6 @@ const DetailEC2: React.FC = (props: DetailEC2Props) => { InstanceWithStatusType[] >([]); const [loadingInstance, setLoadingInstance] = useState(false); - const [loadingRefresh, setLoadingRefresh] = useState(false); const [removeInstanceList, setRemoveInstanceList] = useState([]); const [checkedInstanceList, setCheckedInstanceList] = useState< @@ -72,43 +72,83 @@ const DetailEC2: React.FC = (props: DetailEC2Props) => { const [loadingAdd, setLoadingAdd] = useState(false); const getAllInstanceDetailAndStatus = async () => { + setLoadingInstance(true); + setInstanceInfoList([]); if ( instanceGroup.ec2?.instances && instanceGroup.ec2?.instances.length > 0 ) { - const tmpInstanceInfoList: InstanceWithStatusType[] = []; - setLoadingRefresh(true); - for (let i = 0; i < instanceGroup.ec2?.instances.length; i++) { - const accountId = defaultStr(instanceGroup?.accountId); - // Get a single instance info + const instanceIds = instanceGroup.ec2.instances.map( + (instance) => instance?.instanceId + ); + const chunkSize = 50; + // // split instanceIds into chunks + const instanceChunks: (string | undefined)[][] = []; + for (let i = 0; i < instanceIds.length; i += chunkSize) { + instanceChunks.push(instanceIds.slice(i, i + chunkSize)); + } + const allInstanceInfo: InstanceWithStatusType[] = []; + for (const chunk of instanceChunks) { + // Get all instance info const dataInstanceInfo = await appSyncRequestQuery(listInstances, { maxResults: 50, nextToken: "", - accountId: accountId, + accountId: instanceGroup?.accountId, region: amplifyConfig.aws_project_region, - instanceSet: [instanceGroup.ec2?.instances[i]?.instanceId], + instanceSet: chunk, + }); + const instanceWithStatusList: InstanceWithStatusType[] = + dataInstanceInfo.data?.listInstances?.instances; + // available instance list + const availableInstanceList = instanceWithStatusList?.map( + (item) => item.id + ); + // update instanceWithStatus with unknown + const instanceWithUnknownList: InstanceWithStatusType[] = []; + chunk.forEach((chunkId) => { + const avaInstance = instanceWithStatusList?.find( + (item) => item.id === chunkId + ); + if (avaInstance) { + instanceWithUnknownList.push(avaInstance); + } else { + instanceWithUnknownList.push({ + computerName: "-", + id: chunkId, + ipAddress: "-", + name: "-", + platformName: "-", + status: StatusType.Unknown, + }); + } }); + // Get all instance status const statusData = await appSyncRequestQuery(getInstanceAgentStatus, { - instanceIds: [instanceGroup.ec2?.instances[i]?.instanceId], - accountId: accountId, + instanceIds: availableInstanceList, + accountId: instanceGroup?.accountId, }); - - const instanceStatusList = - statusData.data.getInstanceAgentStatus.instanceAgentStatusList; - - // Update tmpInstanceInfoList with instanceStatus - tmpInstanceInfoList.push({ - ...(dataInstanceInfo.data.listInstances?.instances?.[0] || { - id: instanceGroup.ec2?.instances[i]?.instanceId, - }), - status: dataInstanceInfo.data.listInstances?.instances?.[0] - ? instanceStatusList[0].status - : "Unknown", + const instanceStatusResp: CommandResponse = + statusData.data.getInstanceAgentStatus; + const updatedInstances = instanceWithUnknownList?.map((instance) => { + const statusUpdate = instanceStatusResp.instanceAgentStatusList.find( + (status) => status.instanceId === instance.id + ); + if (statusUpdate) { + return { + ...instance, + status: statusUpdate.status, + invocationOutput: statusUpdate.invocationOutput, + }; + } + return instance; }); + if (updatedInstances) { + allInstanceInfo.push(...updatedInstances); + } } - setLoadingRefresh(false); - setInstanceInfoList(tmpInstanceInfoList); + setInstanceInfoList(allInstanceInfo); + setLoadingInstance(false); } }; @@ -194,10 +234,7 @@ const DetailEC2: React.FC = (props: DetailEC2Props) => { instanceGroup.ec2?.instances.length > 0 ) { setLoadingInstance(true); - getAllInstanceDetailAndStatus().then(() => { - setLoadingInstance(false); - getAllInstanceDetailAndStatus(); - }); + getAllInstanceDetailAndStatus(); } }, [instanceGroup.ec2?.instances]); @@ -254,8 +291,11 @@ const DetailEC2: React.FC = (props: DetailEC2Props) => { { id: "agent", header: t("resource:group.detail.list.agent"), - cell: () => { - return getFLBVersionByType(instanceGroup.ec2?.groupPlatform); + cell: (e) => { + return getFLBVersionByType( + instanceGroup.ec2?.groupPlatform, + e.status + ); }, }, { @@ -292,18 +332,18 @@ const DetailEC2: React.FC = (props: DetailEC2Props) => { )}
} pagination={
} /> + { if (!logType) return ""; - if (isSingleLineText(logType)) { + if (isSingleLineText(logType) || isNginxOrApache(logType)) { return defaultStr(LOG_CONFIG_TYPE_MAP[logType].regexTitle); } return parser ? defaultStr(LOG_CONFIG_PARSER_MAP[parser].regexTitle) : ""; @@ -431,7 +435,7 @@ export const getRegexDescription = ( parser: ParserType ): string => { if (!logType) return ""; - if (isSingleLineText(logType)) { + if (isSingleLineText(logType) || isNginxOrApache(logType)) { return defaultStr(LOG_CONFIG_TYPE_MAP[logType].regexDescription); } return parser @@ -592,10 +596,11 @@ export const buildRegexFromNginxLog = ( if (hasGroup) { groupName = `?<${match.substring(1, match.length)}>`; } + console.info("match:match:", match); if (match === `$request`) { return `(?\\S+)\\s+(?\\S+)\\s+\\S+`; } else if (match.startsWith("$time")) { - return `(${groupName}\\d+/\\S+/\\d+:\\d+:\\d+:\\d+\\s+\\S+)`; + return `(${groupName}\\d+\\/\\S+\\/\\d+:\\d+:\\d+:\\d+\\s+\\S+)`; } else if (match.startsWith("$http")) { return `(${groupName}[^"]*)`; } else if (match.startsWith("$")) { @@ -608,7 +613,7 @@ export const buildRegexFromNginxLog = ( // 匹配到以 $ 符号开头的 const regExDollar = /\$[^\\\][\s$"]*/gm; function replaceSplitItems(match: any) { - match = match.replace("[", "\\[").replace("]", "\\]"); + match = match.replaceAll("[", "\\[").replaceAll("]", "\\]"); return match.replace(regExDollar, replaceDollarItems); } const afterReplaceSplit = logContentString.replace( @@ -1068,9 +1073,6 @@ export const validateRegex = (state: LogConfigState) => { export const validateSampleLog = (state: LogConfigState) => { // do not validate sample log if the type is apache or nginx - if (isNginxOrApache(state.data.logType)) { - return ""; - } if (!state.data.userSampleLog?.trim()) { if (state.data.logType === LogType.JSON) { return "resource:config.parsing.sampleLogJSONDesc"; @@ -1200,8 +1202,9 @@ const handleFormatChangeForNginx = ( const regexStr = buildRegexFromNginxLog(action.payload, true); if (action.payload && regexStr === INVALID) { state.logFormatError = "resource:config.common.nginxFormatInvalid"; + state.data.regex = ""; } else { - state.data.regex = regexStr; + state.data.regex = regexStr === INVALID ? "" : regexStr; } }; @@ -1212,8 +1215,9 @@ const handleFormatChangeForApache = ( const regexStr = buildRegexFromApacheLog(action.payload); if (action.payload && regexStr === INVALID) { state.logFormatError = "resource:config.common.apacheFormatError"; + state.data.regex = ""; } else { - state.data.regex = regexStr; + state.data.regex = regexStr === INVALID ? "" : regexStr; } }; @@ -1333,7 +1337,7 @@ export const logConfigSlice = createSlice({ } if (isNginxOrApache(action.payload.logType)) { state.showLogFormat = true; - state.showRegex = false; + state.showRegex = true; } else { state.showLogFormat = false; if (isSingleLineText(action.payload.logType)) { @@ -1451,7 +1455,9 @@ export const logConfigSlice = createSlice({ state.logFormatError = ""; if (state.data.logType === LogType.Nginx) { handleFormatChangeForNginx(state, action); + state.showRegex = true; } else if (state.data.logType === LogType.Apache) { + state.showRegex = true; handleFormatChangeForApache(state, action); } else if ( state.data.logType === LogType.Syslog && diff --git a/source/portal/src/router/AmplifyAppRouter.tsx b/source/portal/src/router/AmplifyAppRouter.tsx index 9b384433..e6ee9397 100644 --- a/source/portal/src/router/AmplifyAppRouter.tsx +++ b/source/portal/src/router/AmplifyAppRouter.tsx @@ -36,7 +36,10 @@ const AmplifyAppRouter: React.FC = () => { window.localStorage.removeItem(AMPLIFY_CONFIG_JSON); window.location.reload(); } else if (payload?.event === "tokenRefresh_failure") { - Alert(t("signin.reSignInDesc"), t("signin.reSignIn"), "warning", true); + const headerElement = document.getElementById("cloSignedHeader"); + if (headerElement) { + Alert(t("signin.reSignInDesc"), t("signin.reSignIn"), "warning", true); + } } else { Auth?.currentAuthenticatedUser() .then((authData: any) => {