Configure Opensearch on AWS

This commit adds the CloudFormation files for the
current OpenStack AWS OpenSearch cluster, and the
associated Logstash servers and load balancer.

Change-Id: I4ff2aecb668279f8aabb8d72ffd6f4a2c960ed89
This commit is contained in:
Reed Tomlinson 2022-01-25 20:46:01 -06:00 committed by daniel.pawlik
parent 7fcc5517b8
commit bff3f150b8
11 changed files with 807 additions and 0 deletions

View File

@ -0,0 +1,33 @@
# Based on these instructions: https://opensearch.org/docs/latest/clients/logstash/index/#docker
# Once Log4J vulnerability is fixed in this OpenSearch Docker repo, upgrade to 7.16.1 or later:
#. FROM opensearchproject/logstash-oss-with-opensearch-output-plugin:7.13.2
# See:
# https://github.com/opensearch-project/logstash-output-opensearch/issues/107
# https://github.com/opensearch-project/logstash-output-opensearch/issues/108
# In the meantime, we will start with Logstash 7.16.1 and add the logstash-output-opensearch plugin manually:
FROM docker.elastic.co/logstash/logstash-oss:7.16.1
RUN bin/logstash-plugin install logstash-output-opensearch
RUN rm -f /usr/share/logstash/config/*.conf
RUN /usr/share/logstash/bin/logstash-plugin install logstash-input-sqs
RUN /usr/share/logstash/bin/logstash-plugin install logstash-filter-json
RUN /usr/share/logstash/bin/logstash-plugin install logstash-filter-multiline
# ADD config/logstash.yml /usr/share/logstash/config/logstash.yml
ADD config/input.conf /usr/share/logstash/config/00-input.conf
ADD config/openstack-filters.conf /usr/share/logstash/config/50-openstack-logstash-filters.conf
ADD config/output.conf /usr/share/logstash/config/99-output.conf
RUN cat /usr/share/logstash/config/00-input.conf > /usr/share/logstash/config/pipeline.conf
RUN echo "" >> /usr/share/logstash/config/pipeline.conf
RUN cat /usr/share/logstash/config/50-openstack-logstash-filters.conf >> /usr/share/logstash/config/pipeline.conf
RUN echo "" >> /usr/share/logstash/config/pipeline.conf
RUN cat /usr/share/logstash/config/99-output.conf >> /usr/share/logstash/config/pipeline.conf
# ADD config/pipeline.conf /usr/share/logstash/config/pipeline.conf
# ADD config/ /usr/share/logstash/config/
# To start Logstash:
CMD bin/logstash --log.level debug -f config/pipeline.conf --config.reload.automatic

View File

@ -0,0 +1,44 @@
# About
This folder contains CloudFormation configurations for an AWS OpenSearch cluster and a set of Logstash servers behind a load balancer.
# Usage
You'll need appropriate AWS permissions (to create and monitor resources). Put AWS credentials in `~/.aws/credentials` and run `deploy_opensearch.sh`.
# After Creation
OpenSearch users
* Create a user with username 'logstash' and the entered password in OpenSearch, and assign it the "logstash" role.
* Create a user with username 'readonly' and password 'opensearch-readonly-PUBLIC-2021!' in OpenSearch, and grant it read-only privileges. Give it access to the Global tenant.
In the OpenSearch Dashboard select `Index Management`, `State management policies`, and then `Create Policy`. Make a policy with the following policy statement:
```
{
"policy_id": "DeleteAllDataAfter14Days",
"description": "Delete all data after 14 days",
"last_updated_time": 1639608774297,
"schema_version": 1,
"error_notification": null,
"default_state": "hot",
"states": [
{
"name": "hot",
"actions": [],
"transitions": [
{
"state_name": "delete",
"conditions": {
"min_index_age": "14d"
}
}
]
},
{
"name": "delete",
"actions": [],
"transitions": []
}
],
"ism_template": null
}
```
This will delete all indices that are at least 7 days old (e.g. the `logstash-logs-2021.12.15` index will be deleted on 2021-12-22).

View File

@ -0,0 +1,7 @@
input {
tcp {
port => 9999
codec => json_lines {}
type => "jenkins"
}
}

View File

@ -0,0 +1 @@
http.host: "instance-local-ip"

View File

@ -0,0 +1,110 @@
# You can check grok patterns at http://grokdebug.herokuapp.com/
# NOTE: the rules have been taken from:
# https://opendev.org/openstack/logstash-filters/src/branch/master/filters/openstack-filters.conf
filter {
if "screen" in [tags] and [message] =~ "^\+ " {
drop {}
}
if "console" in [tags] or "console.html" in [tags] {
if [message] == "<pre>" or [message] == "</pre>" {
drop {}
}
multiline {
negate => true
pattern => "^%{TIMESTAMP_ISO8601} \|"
what => "previous"
stream_identity => "%{host}.%{filename}"
}
grok {
# Do multiline matching as the above mutliline filter may add newlines
# to the log messages.
match => { "message" => "(?m)^%{TIMESTAMP_ISO8601:logdate} \| %{GREEDYDATA:logmessage}" }
add_field => { "received_at" => "%{@timestamp}" }
}
} else if "oslofmt" in [tags] {
multiline {
negate => true
pattern => "^(%{TIMESTAMP_ISO8601}|%{SYSLOGTIMESTAMP}) "
what => "previous"
stream_identity => "%{host}.%{filename}"
}
multiline {
negate => false
# NOTE(mriedem): oslo.log 1.2.0 changed the logging_exception_prefix
# config option from using TRACE to ERROR so we have to handle both.
#
# NOTE(sdague): stack traces always include process id, so
# NUMBER being required element here is important, otherwise
# ERROR messages just fold into the previous messages, which are
# typically INFO.
pattern => "^(%{TIMESTAMP_ISO8601}|%{SYSLOGTIMESTAMP})%{SPACE}%{NUMBER}%{SPACE}(TRACE|ERROR)"
what => "previous"
stream_identity => "%{host}.%{filename}"
}
grok {
# Do multiline matching as the above mutliline filter may add newlines
# to the log messages.
# TODO move the LOGLEVELs into a proper grok pattern.
match => { "message" => "(?m)^(%{TIMESTAMP_ISO8601:logdate}|%{SYSLOGTIMESTAMP:logdate})%{SPACE}(%{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?:|%{NUMBER:pid})?%{SPACE}?(?<loglevel>AUDIT|CRITICAL|DEBUG|INFO|TRACE|WARNING|ERROR) \[?\b%{NOTSPACE:module}\b\]?%{SPACE}?%{GREEDYDATA:logmessage}?" }
add_field => { "received_at" => "%{@timestamp}" }
}
} else if "apachecombined" in [tags] {
grok {
match => { "message" => "%{COMBINEDAPACHELOG}" }
add_field => { "received_at" => "%{@timestamp}" }
add_field => { "logdate" => "%{timestamp}" }
add_field => { "logmessage" => "%{verb} %{request} %{response}" }
}
} else if "apacheerror" in [tags] {
grok {
match => { "message" => "\[(?<logdate>%{DAY} %{MONTH} %{MONTHDAY} %{TIME} %{YEAR}%{SPACE}%{TZ}?)\]%{SPACE}\[%{LOGLEVEL:loglevel}\]%{SPACE}%{GREEDYDATA:logmessage}" }
add_field => { "received_at" => "%{@timestamp}" }
}
} else if "libvirt" in [tags] {
grok {
# libvirtd grok filter adapted from
# https://github.com/OpenStratus/openstack-logstash/blob/master/agent.conf
match => { "message" => "%{TIMESTAMP_ISO8601:logdate}:%{SPACE}%{NUMBER:pid}:%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}:%{SPACE}%{GREEDYDATA:logmessage}" }
add_field => { "received_at" => "%{@timestamp}" }
}
} else if "syslog" in [tags] {
grok {
# Syslog grok filter adapted from
# http://cookbook.logstash.net/recipes/syslog-pri/syslog.conf
match => { "message" => "%{SYSLOGTIMESTAMP:logdate}%{SPACE}%{SYSLOGHOST:syslog_host}?%{SPACE}%{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?:? %{GREEDYDATA:logmessage}" }
add_field => { "received_at" => "%{@timestamp}" }
}
}
# Filters below here should be consistent for all Jenkins log formats.
# Remove DEBUG logs to reduce the amount of data that needs to be processed.
if [loglevel] == "DEBUG" {
drop {}
}
if ! ("_grokparsefailure" in [tags]) {
date {
match => [ "logdate",
"yyyy-MM-dd HH:mm:ss.SSS",
"yyyy-MM-dd HH:mm:ss.SSSSSS",
"yyyy-MM-dd HH:mm:ss,SSS",
"yyyy-MM-dd HH:mm:ss",
"MMM d HH:mm:ss",
"MMM dd HH:mm:ss",
"MMM dd HH:mm:ss.SSSSSS",
"dd/MMM/yyyy:HH:mm:ss Z",
"yyyy-MM-dd HH:mm:ss.SSSZ",
"E MMM dd HH:mm:ss yyyy Z",
"E MMM dd HH:mm:ss yyyy",
"ISO8601"
]
timezone => "UTC"
}
mutate {
replace => { "message" => "%{logmessage}" }
}
mutate {
remove_field => [ "logdate", "logmessage" ]
}
}
}

View File

@ -0,0 +1,12 @@
output {
opensearch {
hosts => "opensearch.logs.openstack.org:443"
manage_template => false
timeout => 300
user => "logstash"
password => "DO-NOT-COMMIT-TO-VERSION-CONTROL"
index => "logstash-logs-%{+YYYY.MM.dd}"
ssl => true
ssl_certificate_verification => true
}
}

View File

@ -0,0 +1,15 @@
input {
stdin {
codec => json
}
}
output {
opensearch {
hosts => "search-test-cloudformation-domain-2-ee5yrmisesnnmrhq6ngpszfgvq.us-east-1.es.amazonaws.com:9200"
user => "logstash"
password => "DO-NOT-COMMIT-TO-VERSION-CONTROL"
index => "logstash-logs-%{+YYYY.MM.dd}"
ssl_certificate_verification => true
}
}

View File

@ -0,0 +1,72 @@
# Delete stack if it exists
echo "Deleting ECR stack..."
aws cloudformation delete-stack --stack-name ecr-stack
echo "Waiting 2 minutes (press enter to continue)..."
read -t 120 NullVariable
# Create stack
echo ""
echo "Creating ECR stack..."
aws cloudformation create-stack --stack-name ecr-stack --template-body file://ecr.yaml
echo "Waiting 60 minutes (press enter to continue)..."
read -t 3600 NullVariable
# Deploy stack
echo ""
echo "Deploying ECR stack..."
aws cloudformation deploy --stack-name ecr-stack --template-file ecr.yaml
# Get logstash password from user:
echo "Enter desired password for Logstash user (the OpenSearch user account that Logstash will use to write to OpenSearch)."
echo "Must NOT include these characters ()\"&|![]"
read -p "Password: " logstashPassword
# Write this password to config/output.conf (we'll overwrite it at the end of this file)
# Note that the -i (in-place) option doesn't work on MacOS, so we write to a temporary file and then move it
sed "s/password => \"DO-NOT-COMMIT-TO-VERSION-CONTROL\"/password => \"$logstashPassword\"/g" config/output.conf > config/tmp.conf
mv config/tmp.conf config/output.conf
# Build Docker image for Logstash
docker build -t openstack-logstash-repository . --no-cache
# Erase password from config/output.conf
sed "s/password => \"$logstashPassword\"/password => \"DO-NOT-COMMIT-TO-VERSION-CONTROL\"/g" config/output.conf > config/tmp.conf
mv config/tmp.conf config/output.conf
# ECR login
aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin 035559393697.dkr.ecr.us-east-1.amazonaws.com
# Tag the image
docker tag openstack-logstash-repository:latest 035559393697.dkr.ecr.us-east-1.amazonaws.com/openstack-logstash-repository:latest
# Push the image
docker push 035559393697.dkr.ecr.us-east-1.amazonaws.com/openstack-logstash-repository:latest
# If you push a new version of this Docker container, you can deploy it to an existing ECS stack like so:
# aws ecs update-service --cluster pre-prod-Cluster --service pre-prod-ECSService --force-new-deployment
# (replace "pre-prod-Cluster" and "pre-prod-ECSService" with the correct cluster/stack names). This is a zero-downtime operation.
# Delete stacks if they exist
echo "Deleting Logstash stack..."
aws cloudformation delete-stack --stack-name logstashstack
echo "Waiting 60 minutes (press enter to continue)..."
read -t 3600 NullVariable
echo "Deleting Opensearch stack..."
aws cloudformation delete-stack --stack-name opensearchteststack
echo "Waiting 60 minutes (press enter to continue)..."
read -t 3600 NullVariable
# Create OpenSearch Cluster stack
echo ""
echo "Creating Opensearch stack..."
aws cloudformation create-stack --stack-name opensearchteststack --template-body file://opensearch.yaml --parameters ParameterKey=SSHKey,ParameterValue=aws-keypair-2021-03-22 --capabilities CAPABILITY_NAMED_IAM
echo "Waiting 60 minutes (press enter to continue)..."
read -t 3600 NullVariable
# Create Logstash Cluster stack
aws cloudformation create-stack --stack-name logstashstack --template-body file://logstash_cluster.yaml --capabilities CAPABILITY_NAMED_IAM
echo "Final steps:"
echo " * Create a user with username 'logstash' and the entered password in OpenSearch, and assign it the \"logstash\" role"
echo " * Create a user with username 'readonly' and password 'opensearch-readonly-PUBLIC-2021!' in OpenSearch, and grant it read-only privileges"

View File

@ -0,0 +1,32 @@
AWSTemplateFormatVersion: "2010-09-09"
Description: ECR repo for Logstash containers
Resources:
LogstashECRRepo:
Type: AWS::ECR::Repository
Properties:
RepositoryName: "openstack-logstash-repository"
OpenSearchAdminCredentialsSecret:
Type: AWS::SecretsManager::Secret
Properties:
Description: 'Password for admin account in OpenSearch'
GenerateSecretString:
SecretStringTemplate: '{"username": "admin"}'
GenerateStringKey: 'password'
PasswordLength: 32
ExcludeCharacters: '"@/\'
Outputs:
LogstashECRRepoArn:
Value: !GetAtt LogstashECRRepo.Arn
Export:
Name: LogstashECRRepoArn
OpenSearchAdminCredentialsSecret:
Value: !Ref OpenSearchAdminCredentialsSecret
Export:
Name: OpenSearchAdminCredentialsSecret

View File

@ -0,0 +1,197 @@
Description:
This template deploys an ECS cluster to the provided VPC and subnets
using an Auto Scaling Group
Parameters:
Stage:
Type: String
Default: pre-prod
Description: Deployment stage
Resources:
Cluster:
Type: AWS::ECS::Cluster
Properties:
ClusterName: !Join ['-', [!Ref Stage, 'Cluster']]
ECSExecutionRole:
Type: AWS::IAM::Role
Properties:
RoleName: !Join ['-', [!Ref Stage, 'ExecutionRole']]
AssumeRolePolicyDocument:
Statement:
- Effect: Allow
Principal:
Service: ecs-tasks.amazonaws.com
Action: 'sts:AssumeRole'
ManagedPolicyArns:
- 'arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy'
# Create a TaskDefinition with container details
TaskDefinition:
Type: AWS::ECS::TaskDefinition
DependsOn: LogGroup
Properties:
# 4GB memory
Memory: 4096
# 2 VCPUs
Cpu: 2048
NetworkMode: awsvpc
RequiresCompatibilities:
- 'FARGATE'
TaskRoleArn: !Ref ECSExecutionRole
ExecutionRoleArn: !Ref ECSExecutionRole
ContainerDefinitions:
# TODO: Make this a param, or get it from outputs...
- Name: !Join ['-', [!Ref Stage, 'Container']]
Image: 035559393697.dkr.ecr.us-east-1.amazonaws.com/openstack-logstash-repository:latest
PortMappings:
- ContainerPort: 9600
HostPort: 9600
- ContainerPort: 9999
HostPort: 9999
LogConfiguration:
LogDriver: awslogs
Options:
awslogs-region: !Ref AWS::Region
awslogs-group: !Ref LogGroup
awslogs-stream-prefix: ecs
# # Create a TaskDefinition with container details
# TaskDefinition:
# Type: AWS::ECS::TaskDefinition
# Properties:
# Memory: 1024
# Cpu: 512
# NetworkMode: awsvpc
# RequiresCompatibilities:
# - 'FARGATE'
# TaskRoleArn: !Ref ExecutionRole
# ExecutionRoleArn: !Ref ExecutionRole
# ContainerDefinitions:
# - Name: !Join ['-', [!Ref Stage, !Ref 'AWS::AccountId', 'Container']]
# Image: !Ref ImageURI
# PortMappings:
# - ContainerPort: !Ref ContainerPort
# HostPort: !Ref ContainerPort
# Creat a security group for load balancer and open ports 9600 and 9999 in-bound from internet
LoadBalancerSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: !Join ['-', [!Ref Stage, 'LoadBalancerSecurityGroup']]
VpcId: {Fn::ImportValue: VPC}
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: 9600
ToPort: 9600
CidrIp: 0.0.0.0/0
- IpProtocol: tcp
FromPort: 9999
ToPort: 9999
CidrIp: 0.0.0.0/0
# Create a security group for Containers and open-in bound Container ports from Load balancer security group to the Container
ContainerSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: !Join ['-', [!Ref Stage, 'ContainerSecurityGroup']]
VpcId: {Fn::ImportValue: VPC}
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: 9600
ToPort: 9600
CidrIp: 0.0.0.0/0
# SourceSecurityGroupId: !Ref LoadBalancerSecurityGroup
- IpProtocol: tcp
FromPort: 9999
ToPort: 9999
CidrIp: 0.0.0.0/0
# SourceSecurityGroupId: !Ref LoadBalancerSecurityGroup
# Create a LoadBalancer and attach the Security group and Subnets
LoadBalancer:
Type: AWS::ElasticLoadBalancingV2::LoadBalancer
Properties:
IpAddressType: ipv4
Scheme: internet-facing
# SecurityGroups:
# - !Ref LoadBalancerSecurityGroup
Subnets:
- {Fn::ImportValue: SubnetPublic1}
- {Fn::ImportValue: SubnetPublic2}
Type: network
# Create a TargetGroup for TCP port 9600
TargetGroup9600:
Type: AWS::ElasticLoadBalancingV2::TargetGroup
Properties:
Port: 9600
Protocol: TCP
TargetType: ip
VpcId: {Fn::ImportValue: VPC}
# Create a TargetGroup for TCP port 9999
TargetGroup9999:
Type: AWS::ElasticLoadBalancingV2::TargetGroup
Properties:
Port: 9999
Protocol: TCP
TargetType: ip
VpcId: {Fn::ImportValue: VPC}
# Create a LoadBalancerListener for port 9600 and attach the TargetGroup and LoadBalancer
LoadBalancerListener9600:
Type: AWS::ElasticLoadBalancingV2::Listener
Properties:
DefaultActions:
- TargetGroupArn: !Ref TargetGroup9600
Type: forward
LoadBalancerArn: !Ref LoadBalancer
Port: 9600
Protocol: TCP
# Create a LoadBalancerListener for port 9600 and attach the TargetGroup and LoadBalancer
LoadBalancerListener9999:
Type: AWS::ElasticLoadBalancingV2::Listener
Properties:
DefaultActions:
- TargetGroupArn: !Ref TargetGroup9999
Type: forward
LoadBalancerArn: !Ref LoadBalancer
Port: 9999
Protocol: TCP
# Create an ECS Service and add created Cluster, TaskDefintion, Subnets, TargetGroup and SecurityGroup
ECSService:
Type: AWS::ECS::Service
DependsOn: [LoadBalancerListener9600, LoadBalancerListener9999]
Properties:
ServiceName: !Join ['-', [!Ref Stage, 'ECSService']]
Cluster: !Ref Cluster
TaskDefinition: !Ref TaskDefinition
DesiredCount: 15
LaunchType: FARGATE
NetworkConfiguration:
AwsvpcConfiguration:
AssignPublicIp: ENABLED
Subnets:
- {Fn::ImportValue: SubnetPublic1}
- {Fn::ImportValue: SubnetPublic2}
SecurityGroups:
- !Ref ContainerSecurityGroup
LoadBalancers:
- ContainerName: !Join ['-', [!Ref Stage, 'Container']]
ContainerPort: 9600
TargetGroupArn: !Ref TargetGroup9600
- ContainerName: !Join ['-', [!Ref Stage, 'Container']]
ContainerPort: 9999
TargetGroupArn: !Ref TargetGroup9999
LogGroup:
Type: AWS::Logs::LogGroup
Properties:
LogGroupName: !Join ['', [/ecs/, !Ref Stage, TaskDefinition]]

View File

@ -0,0 +1,284 @@
AWSTemplateFormatVersion: '2010-09-09'
Parameters:
InstanceTypeParameter:
Type: String
Default: t3a.medium
Description: Enter instance size. Default is t3a.medium.
SSHKey:
Type: String
Description: The key used to access the instance.
OpenSearchDomainName:
Type: String
Default: openstack-prod-cluster
Description: Name for OpenSearch cluster
OpenSearchCustomEndpoint:
Type: String
Default: opensearch.logs.openstack.org
Description: Custom endpoint for OpenSearch cluster
# AccountNumber:
# Type: String
# Default: ''
# Description: Account number for the AWS account we're deploying into
Resources:
#############################################
#
# Networking resources
#
#############################################
VPC:
Type: AWS::EC2::VPC
Properties:
CidrBlock: 10.0.0.0/16
EnableDnsSupport: true
EnableDnsHostnames: true
InstanceTenancy: default
Tags:
- Key: Name
Value: Opensearch VPC
InternetGateway:
Type: AWS::EC2::InternetGateway
VPCGatewayAttachment:
Type: AWS::EC2::VPCGatewayAttachment
Properties:
VpcId: !Ref 'VPC'
InternetGatewayId: !Ref 'InternetGateway'
SubnetPublic1:
Type: AWS::EC2::Subnet
Properties:
AvailabilityZone: us-east-1a
VpcId: !Ref 'VPC'
CidrBlock: 10.0.0.0/24
MapPublicIpOnLaunch: true
SubnetPrivate1:
Type: AWS::EC2::Subnet
Properties:
AvailabilityZone: us-east-1a
VpcId: !Ref 'VPC'
CidrBlock: 10.0.1.0/24
MapPublicIpOnLaunch: false
SubnetPublic2:
Type: AWS::EC2::Subnet
Properties:
AvailabilityZone: us-east-1b
VpcId: !Ref 'VPC'
CidrBlock: 10.0.2.0/24
MapPublicIpOnLaunch: true
SubnetPrivate2:
Type: AWS::EC2::Subnet
Properties:
AvailabilityZone: us-east-1b
VpcId: !Ref 'VPC'
CidrBlock: 10.0.3.0/24
MapPublicIpOnLaunch: false
RouteTable:
Type: AWS::EC2::RouteTable
Properties:
VpcId: !Ref 'VPC'
InternetRoute:
Type: AWS::EC2::Route
DependsOn: VPCGatewayAttachment
Properties:
DestinationCidrBlock: '0.0.0.0/0'
GatewayId: !Ref 'InternetGateway'
RouteTableId: !Ref 'RouteTable'
# Attach Public Route to Public Subnets
SubnetPublic1PublicRouteAssociation:
Type: AWS::EC2::SubnetRouteTableAssociation
Properties:
RouteTableId: !Ref RouteTable
SubnetId: !Ref SubnetPublic1
SubnetPublic2PublicRouteAssociation:
Type: AWS::EC2::SubnetRouteTableAssociation
Properties:
RouteTableId: !Ref RouteTable
SubnetId: !Ref SubnetPublic2
# SubnetPublicRouteTableAssociation:
# Type: AWS::EC2::SubnetRouteTableAssociation
# Properties:
# RouteTableId: !Ref 'RouteTable'
# SubnetId: !Ref 'SubnetPublic'
#############################################
#
# OpenSearch Cluster and related resources
#
#############################################
KMSKey:
Type: AWS::KMS::Key
Properties:
Description: KMS Key for OpenSearch encryption at rest
Enabled: true
EnableKeyRotation: false # TODO: Can we rotate the key if it's used for encryption at rest?
KeyPolicy:
Id: key-consolepolicy-3
Version: '2012-10-17'
Statement:
- Sid: Enable IAM User Permissions
Effect: Allow
Principal:
AWS: arn:aws:iam::035559393697:root
Action: kms:*
Resource: '*'
- Sid: Allow access for Key Administrators
Effect: Allow
Principal:
AWS: arn:aws:iam::035559393697:user/opensearch-kms-principal
Action:
- kms:Create*
- kms:Describe*
- kms:Enable*
- kms:List*
- kms:Put*
- kms:Update*
- kms:Revoke*
- kms:Disable*
- kms:Get*
- kms:Delete*
- kms:TagResource
- kms:UntagResource
- kms:ScheduleKeyDeletion
- kms:CancelKeyDeletion
- kms:ReplicateKey
- kms:UpdatePrimaryRegion
Resource: '*'
- Sid: Allow use of the key
Effect: Allow
Principal:
AWS: arn:aws:iam::035559393697:user/opensearch-kms-principal
Action:
- kms:Encrypt
- kms:Decrypt
- kms:ReEncrypt*
- kms:GenerateDataKey*
- kms:DescribeKey
Resource: '*'
- Sid: Allow attachment of persistent resources
Effect: Allow
Principal:
AWS: arn:aws:iam::035559393697:user/opensearch-kms-principal
Action:
- kms:CreateGrant
- kms:ListGrants
- kms:RevokeGrant
Resource: '*'
Condition:
Bool:
kms:GrantIsForAWSResource: 'true'
KeySpec: SYMMETRIC_DEFAULT
MultiRegion: true
# We need to create a Service-Linked Role for OpenSearch so that it can perform certain actions (like assigning a domain endpoint with a
# certificate to our cluster). Simply creating it is enough, it doesn't have to be assigned anywhere.
# TODO: This requires certain permissions for the user account that's creating the CloudFormation stack, see https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#service-linked-role-permissions
OpensearchSLR:
Type: 'AWS::IAM::ServiceLinkedRole'
Properties:
AWSServiceName: es.amazonaws.com
Description: Service-linked role for OpenSearch
OpenSearchCluster:
Type: AWS::OpenSearchService::Domain
Properties:
AccessPolicies:
Version: '2012-10-17'
Statement:
- Effect: Allow
Principal:
AWS: '*'
Action: es:ESHttp*
Resource: !Join ['', ['arn:aws:es:us-east-1:', !Ref 'AWS::AccountId', ':domain/', !Ref OpenSearchDomainName, '/*']]
AdvancedSecurityOptions:
# Enable fine-grained access control
Enabled: true
# Create an internal user database for username/password authentication
InternalUserDatabaseEnabled: true
MasterUserOptions:
# Get username and password from Secret Manager (using resource exported from ecr.yaml)
MasterUserName: !Join ['', ['{{resolve:secretsmanager:', Fn::ImportValue: !Sub OpenSearchAdminCredentialsSecret, ':SecretString:username}}' ]]
MasterUserPassword: !Join ['', ['{{resolve:secretsmanager:', Fn::ImportValue: !Sub OpenSearchAdminCredentialsSecret, ':SecretString:password}}' ]]
ClusterConfig:
DedicatedMasterEnabled: true
DedicatedMasterCount: 3
DedicatedMasterType: m6g.xlarge.search
# For zone-aware, this has to be an even number:
InstanceCount: 8
InstanceType: m6g.xlarge.search
WarmEnabled: false
ZoneAwarenessEnabled: true
ZoneAwarenessConfig:
AvailabilityZoneCount: 2
DomainEndpointOptions:
CustomEndpointEnabled: true
CustomEndpoint: !Ref OpenSearchCustomEndpoint
CustomEndpointCertificateArn: arn:aws:acm:us-east-1:035559393697:certificate/1f23d574-2e92-4f5a-8a16-29859ae63c42
EnforceHTTPS: true
# Require TLS1.2 (alternative is 1.0 which is insecure)
TLSSecurityPolicy: Policy-Min-TLS-1-2-2019-07
DomainName:
Ref: OpenSearchDomainName
EBSOptions:
EBSEnabled: true
VolumeSize: 50
VolumeType: gp2
EncryptionAtRestOptions:
Enabled: true
KmsKeyId: !Ref 'KMSKey'
# As of 2021-12-14, "OpenSearch_1.0" is the only valid option. Immediately after deploying,
# upgrade via the AWS Console to at least version 1.2.1 (which uses Logstash version 2.15.0
# and patches Log4Shell vulnerabilities)
EngineVersion: OpenSearch_1.0
NodeToNodeEncryptionOptions:
Enabled: true
# Allow upgrades to newest version of OpenSearch without replacing the entire cluster
UpdatePolicy:
EnableVersionUpgrade: true
Outputs:
VPC:
Value: !Ref VPC
Description: VPC (imported in logstash_cluster.yaml)
Export:
Name: VPC
SubnetPublic1:
Value: !Ref SubnetPublic1
Description: SubnetPublic1 (imported in logstash_cluster.yaml)
Export:
Name: SubnetPublic1
SubnetPrivate1:
Value: !Ref SubnetPrivate1
Description: SubnetPrivate1 (imported in logstash_cluster.yaml)
Export:
Name: SubnetPrivate1
SubnetPublic2:
Value: !Ref SubnetPublic2
Description: SubnetPublic2 (imported in logstash_cluster.yaml)
Export:
Name: SubnetPublic2
SubnetPrivate2:
Value: !Ref SubnetPrivate2
Description: SubnetPrivate2 (imported in logstash_cluster.yaml)
Export:
Name: SubnetPrivate2