Skip to content

Commit 2407e45

Browse files
committed
Initial commit from challenge
0 parents  commit 2407e45

20 files changed

+3521
-0
lines changed

.circleci/config.yml

Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
version: 2
2+
defaults: &defaults
3+
docker:
4+
- image: circleci/python:2.7-stretch-browsers
5+
install_dependency: &install_dependency
6+
name: Installation of build and deployment dependencies.
7+
command: |
8+
sudo apt install jq
9+
sudo pip install awscli --upgrade
10+
sudo pip install docker-compose
11+
sudo curl -o /usr/local/bin/ecs-cli https://s3.amazonaws.com/amazon-ecs-cli/ecs-cli-linux-amd64-latest
12+
sudo chmod +x /usr/local/bin/ecs-cli
13+
14+
install_deploysuite: &install_deploysuite
15+
name: Installation of install_deploysuite.
16+
command: |
17+
git clone --branch v1.3 https://github.com/topcoder-platform/tc-deploy-scripts ../buildscript
18+
cp ./../buildscript/master_deploy.sh .
19+
cp ./../buildscript/buildenv.sh .
20+
cp ./../buildscript/awsconfiguration.sh .
21+
run_build: &run_build
22+
name: Installation of build dependencies.
23+
command: ./build.sh
24+
25+
jobs:
26+
# Build & Deploy against development backend
27+
"build-dev":
28+
<<: *defaults
29+
30+
steps:
31+
# Initialization.
32+
- checkout
33+
- setup_remote_docker
34+
- run: *install_dependency
35+
- run: *install_deploysuite
36+
- run: *run_build
37+
38+
- deploy:
39+
command: |
40+
./awsconfiguration.sh DEV
41+
source awsenvconf
42+
./buildenv.sh -e DEV -b resource-processor-es_deployvar
43+
source buildenvvar
44+
./master_deploy.sh -d ECS -e DEV -t latest -s dev-global-appvar,resource-processor-es_appvar -i resource-processor-es
45+
46+
47+
"build-prod":
48+
<<: *defaults
49+
steps:
50+
# Initialization.
51+
- checkout
52+
- setup_remote_docker
53+
- run: *install_dependency
54+
- run: *install_deploysuite
55+
- run: *run_build
56+
- deploy:
57+
command: |
58+
./awsconfiguration.sh PROD
59+
source awsenvconf
60+
./buildenv.sh -e PROD -b resource-processor-es_deployvar
61+
source buildenvvar
62+
./master_deploy.sh -d ECS -e PROD -t latest -s prod-global-appvar,resource-processor-es_appvar -i resource-processor-es
63+
workflows:
64+
version: 2
65+
build:
66+
jobs:
67+
# Development builds are executed on "develop" branch only.
68+
- "build-dev":
69+
context : org-global
70+
filters:
71+
branches:
72+
only: develop
73+
- "build-prod":
74+
context : org-global
75+
filters:
76+
branches:
77+
only: master

.dockerignore

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
node_modules/
2+
.env
3+
coverage/
4+
.nyc_output/

.gitignore

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
.idea
2+
node_modules
3+
*.log
4+
.DS_Store
5+
.env
6+
coverage
7+
.nyc_output

Procfile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
worker: npm start

README.md

Lines changed: 188 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,188 @@
1+
# Topcoder - Resources Elasticsearch Processor
2+
3+
## Dependencies
4+
5+
- nodejs https://nodejs.org/en/ (v10)
6+
- Kafka
7+
- Elasticsearch 6.8.4
8+
- Docker, Docker Compose
9+
10+
## Configuration
11+
12+
Configuration for the processor is at `config/default.js`.
13+
The following parameters can be set in config files or in env variables:
14+
15+
- DISABLE_LOGGING: whether to disable logging; default value is false
16+
- LOG_LEVEL: the log level; default value: 'debug'
17+
- KAFKA_URL: comma separated Kafka hosts; default value: 'localhost:9092'
18+
- KAFKA_GROUP_ID: the Kafka group id; default value: 'resource-processor-es'
19+
- KAFKA_CLIENT_CERT: Kafka connection certificate, optional; default value is undefined;
20+
if not provided, then SSL connection is not used, direct insecure connection is used;
21+
if provided, it can be either path to certificate file or certificate content
22+
- KAFKA_CLIENT_CERT_KEY: Kafka connection private key, optional; default value is undefined;
23+
if not provided, then SSL connection is not used, direct insecure connection is used;
24+
if provided, it can be either path to private key file or private key content
25+
- RESOURCE_CREATE_TOPIC: create resource Kafka topic, default value is 'challenge.action.resource.create'
26+
- RESOURCE_DELETE_TOPIC: delete resource Kafka topic, default value is 'challenge.action.resource.delete'
27+
- RESOURCE_ROLE_CREATE_TOPIC: create resource role Kafka topic, default value is 'challenge.action.resource.role.create'
28+
- RESOURCE_ROLE_UPDATE_TOPIC: update resource role Kafka topic, default value is 'challenge.action.resource.role.update'
29+
- ES.HOST: Elasticsearch host, default value is 'localhost:9200'
30+
- ES.AWS_REGION: AWS Region to be used if we use AWS ES, default value is 'us-east-1'
31+
- ES.API_VERSION: Elasticsearch API version, default value is '6.8'
32+
- ES.RESOURCE_INDEX: Elasticsearch index name for resources, default value is 'resources'
33+
- ES.RESOURCE_TYPE: Elasticsearch index type for resources, default value is '_doc'
34+
- ES.RESOURCE_ROLE_INDEX: Elasticsearch index name for resource roles, default value is 'resource_roles'
35+
- ES.RESOURCE_ROLE_TYPE: Elasticsearch index type for resource roles, default value is '_doc'
36+
37+
Also note that there is a `/health` endpoint that checks for the health of the app.
38+
This sets up an expressjs server and listens on the environment variable `PORT`.
39+
It's not part of the configuration file and needs to be passed as an environment variable.
40+
Default health check port is 3000 if not set.
41+
42+
43+
## Local Kafka setup
44+
45+
- `http://kafka.apache.org/quickstart` contains details to setup and manage Kafka server,
46+
below provides details to setup Kafka server in Mac, Windows will use bat commands in bin/windows instead
47+
- download kafka at `https://www.apache.org/dyn/closer.cgi?path=/kafka/1.1.0/kafka_2.11-1.1.0.tgz`
48+
- extract out the doanlowded tgz file
49+
- go to extracted directory kafka_2.11-0.11.0.1
50+
- start ZooKeeper server:
51+
`bin/zookeeper-server-start.sh config/zookeeper.properties`
52+
- use another terminal, go to same directory, start the Kafka server:
53+
`bin/kafka-server-start.sh config/server.properties`
54+
- note that the zookeeper server is at localhost:2181, and Kafka server is at localhost:9092
55+
- use another terminal, go to same directory, create some topics:
56+
`bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic challenge.action.resource.create`
57+
`bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic challenge.action.resource.delete`
58+
`bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic challenge.action.resource.role.create`
59+
`bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic challenge.action.resource.role.update`
60+
- verify that the topics are created:
61+
`bin/kafka-topics.sh --list --zookeeper localhost:2181`,
62+
it should list out the created topics
63+
- run the producer and then write some message into the console to send to the `challenge.action.resource.create` topic:
64+
`bin/kafka-console-producer.sh --broker-list localhost:9092 --topic challenge.action.resource.create`
65+
in the console, write message, one message per line:
66+
`{ "topic": "challenge.action.resource.create", "originator": "topcoder-resources-api", "timestamp": "2019-02-16T00:00:00", "mime-type": "application/json", "payload": { "id": "173803d3-019e-4033-b1cf-d7205c7f774c", "challengeId": "123", "memberId": "456", "memberHandle": "tester", "roleId": "172803d3-019e-4033-b1cf-d7205c7f774a" } }`
67+
- optionally, use another terminal, go to same directory, start a consumer to view the messages:
68+
`bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic challenge.action.resource.create --from-beginning`
69+
- writing/reading messages to/from other topics are similar
70+
71+
72+
## Elasticsearch setup
73+
74+
Just run `docker-compose up` in `local` folder.
75+
76+
77+
## Local deployment
78+
79+
- install dependencies `npm i`
80+
- run code lint check `npm run lint`
81+
- fix some code lint errors `npm run lint:fix`
82+
- initialize Elasticsearch, create (recreate if present) configured Elasticsearch indices: `npm run init-es`
83+
- start processor app `npm start`
84+
85+
## Local Deployment with Docker
86+
87+
To run the Resources ES Processor using docker, follow the below steps
88+
89+
1. Navigate to the directory `docker`
90+
91+
2. Rename the file `sample.api.env` to `api.env`
92+
93+
3. Set the required AWS credentials in the file `api.env`
94+
95+
4. Once that is done, run the following command
96+
97+
```
98+
docker-compose up
99+
```
100+
101+
5. When you are running the application for the first time, It will take some time initially to download the image and install the dependencies
102+
103+
104+
## Verification
105+
106+
- setup kafka server, start elasticsearch, initialize Elasticsearch, start processor app
107+
- start kafka-console-producer to write messages to `challenge.action.resource.create` topic:
108+
`bin/kafka-console-producer.sh --broker-list localhost:9092 --topic challenge.action.resource.create`
109+
- write message:
110+
`{ "topic": "challenge.action.resource.create", "originator": "topcoder-resources-api", "timestamp": "2019-02-16T00:00:00", "mime-type": "application/json", "payload": { "id": "173803d3-019e-4033-b1cf-d7205c7f774c", "challengeId": "123", "memberId": "456", "memberHandle": "tester", "roleId": "172803d3-019e-4033-b1cf-d7205c7f774a" } }`
111+
- run command `npm run view-data resources 173803d3-019e-4033-b1cf-d7205c7f774c` to view the created data, you will see the data are properly created:
112+
113+
```bash
114+
info: Elasticsearch data:
115+
info: {
116+
"id": "173803d3-019e-4033-b1cf-d7205c7f774c",
117+
"challengeId": "123",
118+
"memberId": "456",
119+
"memberHandle": "tester",
120+
"roleId": "172803d3-019e-4033-b1cf-d7205c7f774a"
121+
}
122+
info: Done!
123+
```
124+
125+
- you may write invalid message like:
126+
`{ "topic": "challenge.action.resource.create", "originator": "topcoder-resources-api", "timestamp": "2019-02-16T00:00:00", "mime-type": "application/json", "payload": { "challengeId": "123", "memberId": "456", "memberHandle": "tester", "roleId": "172803d3-019e-4033-b1cf-d7205c7f774a" } }`
127+
128+
`{ "topic": "challenge.action.resource.create", "originator": "topcoder-resources-api", "timestamp": "abc", "mime-type": "application/json", "payload": { "id": "173803d3-019e-4033-b1cf-d7205c7f774c", "challengeId": "123", "memberId": "456", "memberHandle": "tester", "roleId": "172803d3-019e-4033-b1cf-d7205c7f774a" } }`
129+
130+
`{ [ { abc`
131+
- then in the app console, you will see error messages
132+
133+
- start kafka-console-producer to write messages to `challenge.action.resource.delete` topic:
134+
`bin/kafka-console-producer.sh --broker-list localhost:9092 --topic challenge.action.resource.delete`
135+
136+
- write message to delete data:
137+
`{ "topic": "challenge.action.resource.delete", "originator": "topcoder-resources-api", "timestamp": "2019-02-16T00:00:00", "mime-type": "application/json", "payload": { "id": "173803d3-019e-4033-b1cf-d7205c7f774c", "challengeId": "123", "memberId": "456", "memberHandle": "tester", "roleId": "172803d3-019e-4033-b1cf-d7205c7f774a" } }`
138+
- run command `npm run view-data resources 173803d3-019e-4033-b1cf-d7205c7f774c` to view the deleted data, you will see the data are properly deleted:
139+
140+
```bash
141+
info: The data is not found.
142+
```
143+
144+
145+
- start kafka-console-producer to write messages to `challenge.action.resource.role.create` topic:
146+
`bin/kafka-console-producer.sh --broker-list localhost:9092 --topic challenge.action.resource.role.create`
147+
148+
- write message to create data:
149+
`{ "topic": "challenge.action.resource.role.create", "originator": "topcoder-resources-api", "timestamp": "2019-02-16T00:00:00", "mime-type": "application/json", "payload": { "id": "171803d3-019e-4033-b1cf-d7215c7f123a", "name": "role1", "fullAccess": true, "isActive": true, "selfObtainable": false } }`
150+
- run command `npm run view-data resource_roles 171803d3-019e-4033-b1cf-d7215c7f123a` to view the created data, you will see the data are properly created:
151+
152+
```bash
153+
info: Elasticsearch data:
154+
info: {
155+
"id": "171803d3-019e-4033-b1cf-d7215c7f123a",
156+
"name": "role1",
157+
"fullAccess": true,
158+
"isActive": true,
159+
"selfObtainable": false
160+
}
161+
info: Done!
162+
```
163+
164+
- start kafka-console-producer to write messages to `challenge.action.resource.role.update` topic:
165+
`bin/kafka-console-producer.sh --broker-list localhost:9092 --topic challenge.action.resource.role.update`
166+
167+
- write message to update data:
168+
`{ "topic": "challenge.action.resource.role.update", "originator": "topcoder-resources-api", "timestamp": "2019-02-16T00:00:00", "mime-type": "application/json", "payload": { "id": "171803d3-019e-4033-b1cf-d7215c7f123a", "name": "role2", "fullAccess": false, "isActive": true, "selfObtainable": true } }`
169+
- run command `npm run view-data resource_roles 171803d3-019e-4033-b1cf-d7215c7f123a` to view the updated data, you will see the data are properly updated:
170+
171+
```bash
172+
info: Elasticsearch data:
173+
info: {
174+
"id": "171803d3-019e-4033-b1cf-d7215c7f123a",
175+
"name": "role2",
176+
"fullAccess": false,
177+
"isActive": true,
178+
"selfObtainable": true
179+
}
180+
info: Done!
181+
```
182+
183+
- to test the health check API,
184+
run `export PORT=5000` (default port is 3000 if not set),
185+
start the processor,
186+
then browse `http://localhost:5000/health` in a browser,
187+
and you will see result `{"checksRun":1}`
188+

build.sh

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
#!/bin/bash
2+
set -eo pipefail
3+
#ENV=$1
4+
#AWS_ACCOUNT_ID=$(eval "echo \$${ENV}_AWS_ACCOUNT_ID")
5+
#AWS_REGION=$(eval "echo \$${ENV}_AWS_REGION")
6+
#AWS_REPOSITORY=$(eval "echo \$${ENV}_AWS_REPOSITORY")
7+
8+
# Builds Docker image of the app.
9+
#TAG=$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$AWS_REPOSITORY:$CIRCLE_BUILD_NUM
10+
#sed -i='' "s|resource-processor-es:latest|$TAG|" docker/docker-compose.yml
11+
echo "" > docker/api.env
12+
docker-compose -f docker/docker-compose.yml build resource-processor-es
13+
docker images

config/default.js

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
/**
2+
* The default configuration file.
3+
*/
4+
5+
module.exports = {
6+
DISABLE_LOGGING: process.env.DISABLE_LOGGING ? process.env.DISABLE_LOGGING === 'true' : false, // If true, logging will be disabled
7+
LOG_LEVEL: process.env.LOG_LEVEL || 'debug',
8+
9+
KAFKA_URL: process.env.KAFKA_URL || 'localhost:9092',
10+
KAFKA_GROUP_ID: process.env.KAFKA_GROUP_ID || 'resource-processor-es',
11+
// below are used for secure Kafka connection, they are optional
12+
// for the local Kafka, they are not needed
13+
KAFKA_CLIENT_CERT: process.env.KAFKA_CLIENT_CERT,
14+
KAFKA_CLIENT_CERT_KEY: process.env.KAFKA_CLIENT_CERT_KEY,
15+
16+
RESOURCE_CREATE_TOPIC: process.env.RESOURCE_CREATE_TOPIC || 'challenge.action.resource.create',
17+
RESOURCE_DELETE_TOPIC: process.env.RESOURCE_DELETE_TOPIC || 'challenge.action.resource.delete',
18+
RESOURCE_ROLE_CREATE_TOPIC: process.env.RESOURCE_ROLE_CREATE_TOPIC || 'challenge.action.resource.role.create',
19+
RESOURCE_ROLE_UPDATE_TOPIC: process.env.RESOURCE_ROLE_UPDATE_TOPIC || 'challenge.action.resource.role.update',
20+
21+
ES: {
22+
HOST: process.env.ES_HOST || 'localhost:9200',
23+
AWS_REGION: process.env.AWS_REGION || 'us-east-1', // AWS Region to be used if we use AWS ES
24+
API_VERSION: process.env.ES_API_VERSION || '6.8',
25+
RESOURCE_INDEX: process.env.RESOURCE_INDEX || 'resources',
26+
RESOURCE_TYPE: process.env.RESOURCE_TYPE || '_doc',
27+
RESOURCE_ROLE_INDEX: process.env.RESOURCE_ROLE_INDEX || 'resource_roles',
28+
RESOURCE_ROLE_TYPE: process.env.RESOURCE_ROLE_TYPE || '_doc'
29+
}
30+
}

config/production.js

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
/**
2+
* Production configuration file
3+
*/
4+
5+
module.exports = {
6+
LOG_LEVEL: process.env.LOG_LEVEL || 'info'
7+
}

docker/Dockerfile

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
# Use the base image with Node.js
2+
FROM node:10.16
3+
4+
# Copy the current directory into the Docker image
5+
COPY . /resource-processor-es
6+
7+
# Set working directory for future use
8+
WORKDIR /resource-processor-es
9+
10+
# Install the dependencies from package.json
11+
RUN npm install
12+
CMD npm start

docker/docker-compose.yml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
version: '3'
2+
services:
3+
resource-processor-es:
4+
image: resource-processor-es:latest
5+
build:
6+
context: ../
7+
dockerfile: docker/Dockerfile
8+
env_file:
9+
- api.env
10+
network_mode: "host"

docker/sample.api.env

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
AWS_ACCESS_KEY_ID=<AWS Access Key ID>
2+
AWS_SECRET_ACCESS_KEY=<AWS Secret Access Key>
3+
ES_HOST=<ES Host Endpoint>

local/docker-compose.yml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
version: '3'
2+
services:
3+
esearch:
4+
image: "elasticsearch:6.8.4"
5+
ports:
6+
- "9200:9200"

0 commit comments

Comments
 (0)