From 31b2306e537002d8858ddcfa6c6dc69c95a5a4e0 Mon Sep 17 00:00:00 2001 From: Andrea Amorosi Date: Tue, 27 Jun 2023 16:46:02 +0000 Subject: [PATCH 01/24] chore: init workspace --- package-lock.json | 11 ++++ package.json | 3 +- packages/batch/jest.config.js | 28 ++++++++++ packages/batch/package.json | 53 +++++++++++++++++++ packages/batch/src/BatchProcessor.ts | 3 ++ packages/batch/src/index.ts | 1 + .../helpers/populateEnvironmentVariables.ts | 12 +++++ .../batch/tests/unit/BatchProcessor.test.ts | 26 +++++++++ packages/batch/tsconfig-dev.json | 11 ++++ packages/batch/tsconfig.es.json | 11 ++++ packages/batch/tsconfig.json | 29 ++++++++++ packages/batch/typedoc.json | 12 +++++ 12 files changed, 199 insertions(+), 1 deletion(-) create mode 100644 packages/batch/jest.config.js create mode 100644 packages/batch/package.json create mode 100644 packages/batch/src/BatchProcessor.ts create mode 100644 packages/batch/src/index.ts create mode 100644 packages/batch/tests/helpers/populateEnvironmentVariables.ts create mode 100644 packages/batch/tests/unit/BatchProcessor.test.ts create mode 100644 packages/batch/tsconfig-dev.json create mode 100644 packages/batch/tsconfig.es.json create mode 100644 packages/batch/tsconfig.json create mode 100644 packages/batch/typedoc.json diff --git a/package-lock.json b/package-lock.json index fd6e4550dc..6ea8ab66cc 100644 --- a/package-lock.json +++ b/package-lock.json @@ -15,6 +15,7 @@ "packages/tracer", "packages/parameters", "packages/idempotency", + "packages/batch", "docs/snippets", "layers", "examples/cdk", @@ -6519,6 +6520,10 @@ ], "license": "MIT" }, + "node_modules/batch": { + "resolved": "packages/batch", + "link": true + }, "node_modules/before-after-hook": { "version": "2.2.3", "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-2.2.3.tgz", @@ -17921,6 +17926,12 @@ "node": ">= 10" } }, + "packages/batch": { + "name": "@aws-lambda-powertools/batch", + "version": "1.10.0", + "license": "MIT-0", + "devDependencies": {} + }, "packages/commons": { "name": "@aws-lambda-powertools/commons", "version": "1.10.0", diff --git a/package.json b/package.json index 467365c53e..73bd6845da 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,8 @@ "docs/snippets", "layers", "examples/cdk", - "examples/sam" + "examples/sam", + "packages/batch" ], "scripts": { "init-environment": "husky install", diff --git a/packages/batch/jest.config.js b/packages/batch/jest.config.js new file mode 100644 index 0000000000..3db7c7a6da --- /dev/null +++ b/packages/batch/jest.config.js @@ -0,0 +1,28 @@ +module.exports = { + displayName: { + name: 'Powertools for AWS Lambda (TypeScript) utility: BATCH', + color: 'orange', + }, + runner: 'groups', + preset: 'ts-jest', + transform: { + '^.+\\.ts?$': 'ts-jest', + }, + moduleFileExtensions: ['js', 'ts'], + collectCoverageFrom: ['**/src/**/*.ts', '!**/node_modules/**'], + testMatch: ['**/?(*.)+(spec|test).ts'], + roots: ['/src', '/tests'], + testPathIgnorePatterns: ['/node_modules/'], + testEnvironment: 'node', + coveragePathIgnorePatterns: ['/node_modules/', '/types/'], + coverageThreshold: { + global: { + statements: 100, + branches: 100, + functions: 100, + lines: 100, + }, + }, + coverageReporters: ['json-summary', 'text', 'lcov'], + setupFiles: ['/tests/helpers/populateEnvironmentVariables.ts'], +}; diff --git a/packages/batch/package.json b/packages/batch/package.json new file mode 100644 index 0000000000..44e9b37964 --- /dev/null +++ b/packages/batch/package.json @@ -0,0 +1,53 @@ +{ + "name": "@aws-lambda-powertools/batch", + "version": "1.10.0", + "description": "The batch processing package for the Powertools for AWS Lambda (TypeScript) library.", + "author": { + "name": "Amazon Web Services", + "url": "https://aws.amazon.com" + }, + "private": true, + "scripts": { + "test": "npm run test:unit", + "test:unit": "jest --group=unit --detectOpenHandles --coverage --verbose", + "test:e2e:nodejs14x": "echo 'Not Implemented'", + "test:e2e:nodejs16x": "echo 'Not Implemented'", + "test:e2e:nodejs18x": "echo 'Not Implemented'", + "test:e2e": "echo 'Not Implemented'", + "watch": "jest --watch", + "build": "tsc", + "lint": "eslint --ext .ts,.js --no-error-on-unmatched-pattern .", + "lint-fix": "eslint --fix --ext .ts,.js --no-error-on-unmatched-pattern .", + "prebuild": "rimraf ./lib", + "prepack": "node ../../.github/scripts/release_patch_package_json.js ." + }, + "lint-staged": { + "*.ts": "npm run lint-fix", + "*.js": "npm run lint-fix" + }, + "homepage": "https://github.com/aws-powertools/powertools-lambda-typescript/tree/main/packages/batch#readme", + "license": "MIT-0", + "main": "./lib/index.js", + "types": "./lib/index.d.ts", + "files": [ + "lib" + ], + "repository": { + "type": "git", + "url": "git+https://github.com/aws-powertools/powertools-lambda-typescript.git" + }, + "bugs": { + "url": "https://github.com/aws-powertools/powertools-lambda-typescript/issues" + }, + "dependencies": {}, + "keywords": [ + "aws", + "lambda", + "powertools", + "batch", + "batch-processing", + "serverless", + "nodejs" + ], + "devDependencies": {} +} diff --git a/packages/batch/src/BatchProcessor.ts b/packages/batch/src/BatchProcessor.ts new file mode 100644 index 0000000000..11979d8db4 --- /dev/null +++ b/packages/batch/src/BatchProcessor.ts @@ -0,0 +1,3 @@ +class BatchProcessor {} + +export { BatchProcessor }; diff --git a/packages/batch/src/index.ts b/packages/batch/src/index.ts new file mode 100644 index 0000000000..715736b461 --- /dev/null +++ b/packages/batch/src/index.ts @@ -0,0 +1 @@ +export * from './BatchProcessor'; diff --git a/packages/batch/tests/helpers/populateEnvironmentVariables.ts b/packages/batch/tests/helpers/populateEnvironmentVariables.ts new file mode 100644 index 0000000000..cb0b37f295 --- /dev/null +++ b/packages/batch/tests/helpers/populateEnvironmentVariables.ts @@ -0,0 +1,12 @@ +// Reserved variables +process.env._X_AMZN_TRACE_ID = '1-abcdef12-3456abcdef123456abcdef12'; +process.env.AWS_LAMBDA_FUNCTION_NAME = 'my-lambda-function'; +process.env.AWS_EXECUTION_ENV = 'nodejs18.x'; +process.env.AWS_LAMBDA_FUNCTION_MEMORY_SIZE = '128'; +if ( + process.env.AWS_REGION === undefined && + process.env.CDK_DEFAULT_REGION === undefined +) { + process.env.AWS_REGION = 'eu-west-1'; +} +process.env._HANDLER = 'index.handler'; diff --git a/packages/batch/tests/unit/BatchProcessor.test.ts b/packages/batch/tests/unit/BatchProcessor.test.ts new file mode 100644 index 0000000000..f411976325 --- /dev/null +++ b/packages/batch/tests/unit/BatchProcessor.test.ts @@ -0,0 +1,26 @@ +/** + * Test BatchProcessor class + * + * @group unit/batch/class/batchprocessor + */ +import { BatchProcessor } from '../../src'; + +describe('Class: IdempotencyConfig', () => { + const ENVIRONMENT_VARIABLES = process.env; + + beforeEach(() => { + jest.clearAllMocks(); + jest.resetModules(); + process.env = { ...ENVIRONMENT_VARIABLES }; + }); + + afterAll(() => { + process.env = ENVIRONMENT_VARIABLES; + }); + + describe('remove me', () => { + test('does stuff', () => { + expect(BatchProcessor).toBeDefined(); + }); + }); +}); diff --git a/packages/batch/tsconfig-dev.json b/packages/batch/tsconfig-dev.json new file mode 100644 index 0000000000..6f766859ea --- /dev/null +++ b/packages/batch/tsconfig-dev.json @@ -0,0 +1,11 @@ +{ + "extends": "./tsconfig.json", + "compilerOptions": { + "declarationMap": true, + "esModuleInterop": false + }, + "include": [ "src/**/*", "examples/**/*", "**/tests/**/*" ], + "types": [ + "jest" + ] +} \ No newline at end of file diff --git a/packages/batch/tsconfig.es.json b/packages/batch/tsconfig.es.json new file mode 100644 index 0000000000..6f766859ea --- /dev/null +++ b/packages/batch/tsconfig.es.json @@ -0,0 +1,11 @@ +{ + "extends": "./tsconfig.json", + "compilerOptions": { + "declarationMap": true, + "esModuleInterop": false + }, + "include": [ "src/**/*", "examples/**/*", "**/tests/**/*" ], + "types": [ + "jest" + ] +} \ No newline at end of file diff --git a/packages/batch/tsconfig.json b/packages/batch/tsconfig.json new file mode 100644 index 0000000000..09df4b9ba4 --- /dev/null +++ b/packages/batch/tsconfig.json @@ -0,0 +1,29 @@ +{ + "compilerOptions": { + "experimentalDecorators": true, + "noImplicitAny": true, + "target": "ES2020", + "module": "commonjs", + "declaration": true, + "outDir": "lib", + "strict": true, + "inlineSourceMap": true, + "moduleResolution": "node", + "resolveJsonModule": true, + "pretty": true, + "baseUrl": "src/", + "rootDirs": [ "src/" ], + "esModuleInterop": true + }, + "include": [ "src/**/*" ], + "exclude": [ "./node_modules"], + "watchOptions": { + "watchFile": "useFsEvents", + "watchDirectory": "useFsEvents", + "fallbackPolling": "dynamicPriority" + }, + "lib": [ "es2020" ], + "types": [ + "node" + ] +} \ No newline at end of file diff --git a/packages/batch/typedoc.json b/packages/batch/typedoc.json new file mode 100644 index 0000000000..b983e7c7b4 --- /dev/null +++ b/packages/batch/typedoc.json @@ -0,0 +1,12 @@ +{ + "extends": [ + "../../typedoc.base.json" + ], + "entryPoints": [ + "./src/index.ts", + "./src/types/index.ts", + "./src/middleware/index.ts", + "./src/persistence/index.ts" + ], + "readme": "README.md" +} \ No newline at end of file From a3dddbf774f53971a7f5492bc5f7307f9a626c2e Mon Sep 17 00:00:00 2001 From: Andrea Amorosi Date: Tue, 27 Jun 2023 16:46:38 +0000 Subject: [PATCH 02/24] chore: init workspace --- packages/batch/README.md | 0 packages/batch/typedoc.json | 5 +---- 2 files changed, 1 insertion(+), 4 deletions(-) create mode 100644 packages/batch/README.md diff --git a/packages/batch/README.md b/packages/batch/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/batch/typedoc.json b/packages/batch/typedoc.json index b983e7c7b4..ed0ca6fc47 100644 --- a/packages/batch/typedoc.json +++ b/packages/batch/typedoc.json @@ -3,10 +3,7 @@ "../../typedoc.base.json" ], "entryPoints": [ - "./src/index.ts", - "./src/types/index.ts", - "./src/middleware/index.ts", - "./src/persistence/index.ts" + "./src/index.ts" ], "readme": "README.md" } \ No newline at end of file From d87b8953a0c5fa8d020a9d46d097b59f2daf80da Mon Sep 17 00:00:00 2001 From: erikayao93 Date: Fri, 30 Jun 2023 17:38:56 +0000 Subject: [PATCH 03/24] Initial base class implementation --- .../batch/src/BasePartialBatchProcessor.ts | 148 ++++++++++++++++++ packages/batch/src/BasePartialProcessor.ts | 125 +++++++++++++++ packages/batch/src/constants.ts | 11 ++ packages/batch/src/errors.ts | 19 +++ packages/batch/src/index.ts | 5 + packages/batch/src/types.ts | 31 ++++ 6 files changed, 339 insertions(+) create mode 100644 packages/batch/src/BasePartialBatchProcessor.ts create mode 100644 packages/batch/src/BasePartialProcessor.ts create mode 100644 packages/batch/src/constants.ts create mode 100644 packages/batch/src/errors.ts create mode 100644 packages/batch/src/types.ts diff --git a/packages/batch/src/BasePartialBatchProcessor.ts b/packages/batch/src/BasePartialBatchProcessor.ts new file mode 100644 index 0000000000..6f968b578b --- /dev/null +++ b/packages/batch/src/BasePartialBatchProcessor.ts @@ -0,0 +1,148 @@ +import { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; +import { + BasePartialProcessor, + BaseRecord, + BatchProcessingError, + DEFAULT_RESPONSE, + DynamoDBRecordType, + EventSourceDataClassTypes, + EventType, + KinesisStreamRecordType, + SQSRecordType, +} from '.'; + +abstract class BasePartialBatchProcessor extends BasePartialProcessor { + public COLLECTOR_MAPPING; + + public batchResponse: { [key: string]: { [key: string]: string }[] }; + + public eventType: EventType; + + /** + * Process batch and partially report failed items + * @param eventType Whether this is SQS, DynamoDB stream, or Kinesis data stream event + */ + public constructor(eventType: EventType) { + super(); + this.eventType = eventType; + this.batchResponse = DEFAULT_RESPONSE; // need to find deep clone alternative here + this.COLLECTOR_MAPPING = { + [EventType.SQS]: this.collectSqsFailures(), + [EventType.KinesisDataStreams]: this.collectKinesisFailures(), + [EventType.DynamoDBStreams]: this.collectDynamoDBFailures(), + }; + } + + /** + * Report messages to be deleted in case of partial failures + */ + public clean(): void { + if (!this.hasMessagesToReport()) { + return; + } + + if (this.entireBatchFailed()) { + throw new BatchProcessingError( + 'All records failed processing. ' + + this.exceptions.length + + ' individual errors logged separately below.', + this.exceptions + ); + } + + const messages: { [key: string]: string }[] = this.getMessagesToReport(); + this.batchResponse = { batchItemFailures: messages }; + } + + public collectDynamoDBFailures(): { [key: string]: string }[] { + const failures: { [key: string]: string }[] = []; + + this.failureMessages.forEach((msg) => { + const msgId = (msg as DynamoDBRecord).dynamodb?.SequenceNumber; + if (msgId) { + failures.push({ itemIdentifier: msgId }); + } + }); + + return failures; + } + + public collectKinesisFailures(): { [key: string]: string }[] { + const failures: { [key: string]: string }[] = []; + + this.failureMessages.forEach((msg) => { + const msgId = (msg as KinesisStreamRecord).kinesis.sequenceNumber; + failures.push({ itemIdentifier: msgId }); + }); + + return failures; + } + + public collectSqsFailures(): { [key: string]: string }[] { + const failures: { [key: string]: string }[] = []; + + this.failureMessages.forEach((msg) => { + const msgId = (msg as SQSRecord).messageId; + failures.push({ itemIdentifier: msgId }); + }); + + return failures; + } + + public entireBatchFailed(): boolean { + return this.exceptions.length == this.records.length; + } + + /** + * @returns formatted messages to use in batch deletion + */ + public getMessagesToReport(): { [key: string]: string }[] { + return this.COLLECTOR_MAPPING[this.eventType]; + } + + public hasMessagesToReport(): boolean { + if (this.failureMessages.length != 0) { + return true; + } + + console.debug( + 'All ' + this.successMessages.length + ' records successfully processed' + ); + + return false; + } + + /** + * Remove results from previous execution + */ + public prepare(): void { + this.successMessages = []; // TO-DO: see if this needs to be a reference delete + this.failureMessages = []; // If it does, use successMessages.length = 0 (less clarity) + this.exceptions = []; // But it clears the array reference, not just a new empty array + this.batchResponse = DEFAULT_RESPONSE; // need to find deep clone alternative here + } + + /** + * @returns Batch items that failed processing, if any + */ + public response(): { [key: string]: { [key: string]: string }[] } { + return this.batchResponse; + } + + public toBatchType( + record: BaseRecord, + eventType: EventType + ): EventSourceDataClassTypes { + if (eventType == EventType.SQS) { + return record as SQSRecordType; + } else if (eventType == EventType.KinesisDataStreams) { + return record as KinesisStreamRecordType; + } else if (eventType == EventType.DynamoDBStreams) { + return record as DynamoDBRecordType; + } else { + throw new Error('Invalid EventType provided'); + } + } +} + +export { BasePartialBatchProcessor }; diff --git a/packages/batch/src/BasePartialProcessor.ts b/packages/batch/src/BasePartialProcessor.ts new file mode 100644 index 0000000000..3ad98d399f --- /dev/null +++ b/packages/batch/src/BasePartialProcessor.ts @@ -0,0 +1,125 @@ +import { Context } from 'aws-lambda'; +import { + BaseRecord, + EventSourceDataClassTypes, + FailureResponse, + ResultType, + SuccessResponse, +} from '.'; + +/** + * Abstract class for batch processors + */ +abstract class BasePartialProcessor { + public exceptions: Error[]; + + public failureMessages: EventSourceDataClassTypes[]; + + public handler?: CallableFunction; + + public lambdaContext?: Context; + + public records: BaseRecord[]; + + public successMessages: EventSourceDataClassTypes[]; + + public constructor() { + this.successMessages = []; + this.failureMessages = []; + this.exceptions = []; + this.records = []; + } + + /** + * Set instance attributes before execution + * @param records List of records to be processed + * @param handler CallableFunction to process entries of "records" + * @param lambdaContext Optional parameter if lambda_context is to be injected + * @returns this object + */ + public call( + records: BaseRecord[], + handler: CallableFunction, + lambdaContext?: Context + ): BasePartialProcessor { + this.records = records; + this.handler = handler; + + if (lambdaContext != null) { + this.lambdaContext = lambdaContext; + } + + return this; + } + + public abstract clean(): void; + + public enter(): BasePartialProcessor { + this.prepare(); + + return this; + } + + public exit(): void { + this.clean(); + } + + /** + * Keeps track of batch records that failed processing + * @param record record that failed processing + * @param exception exception that was thrown + * @returns FailureResponse object with ["fail", exception, original record] + */ + public failure_handler( + record: EventSourceDataClassTypes, + exception: Error + ): FailureResponse { + const entry: FailureResponse = ['fail', exception.message, record]; + this.exceptions.push(exception); + this.failureMessages.push(record); + + return entry; + } + + public abstract prepare(): void; + + /** + * Call instance's handler for each record + * @returns List of processed records + */ + public async process(): Promise<(SuccessResponse | FailureResponse)[]> { + const processedRecords: (SuccessResponse | FailureResponse)[] = []; + + this.records.forEach((record) => { + processedRecords.push(this.processRecord(record)); + }); + + return processedRecords; + } + + /** + * Process a record with the handler + * @param record Record to be processed + */ + public abstract processRecord( + record: BaseRecord + ): SuccessResponse | FailureResponse; + + /** + * Keeps track of batch records that were processed successfully + * @param record record that succeeded processing + * @param result result from record handler + * @returns SuccessResponse object with ["success", result, original record] + */ + public success_handler( + record: EventSourceDataClassTypes, + result: ResultType + ): SuccessResponse { + const entry: SuccessResponse = ['success', result, record]; + this.successMessages.push(record); + + return entry; + } +} + +export { BasePartialProcessor }; diff --git a/packages/batch/src/constants.ts b/packages/batch/src/constants.ts new file mode 100644 index 0000000000..e525f78ea3 --- /dev/null +++ b/packages/batch/src/constants.ts @@ -0,0 +1,11 @@ +enum EventType { + SQS = 'SQS', + KinesisDataStreams = 'KinesisDataStreams', + DynamoDBStreams = 'DynamoDBStreams', +} + +const DEFAULT_RESPONSE: { [key: string]: { [key: string]: string }[] } = { + batchItemFailures: [], +}; + +export { EventType, DEFAULT_RESPONSE }; diff --git a/packages/batch/src/errors.ts b/packages/batch/src/errors.ts new file mode 100644 index 0000000000..dee8cb2de7 --- /dev/null +++ b/packages/batch/src/errors.ts @@ -0,0 +1,19 @@ +class BaseBatchProcessingError extends Error { + public childExceptions: Error[]; + + public msg: string; + + public constructor(msg: string, childExceptions: Error[]) { + super(); + this.msg = msg; + this.childExceptions = childExceptions; + } +} + +class BatchProcessingError extends BaseBatchProcessingError { + public constructor(msg: string, childExceptions: Error[]) { + super(msg, childExceptions); + } +} + +export { BaseBatchProcessingError, BatchProcessingError }; diff --git a/packages/batch/src/index.ts b/packages/batch/src/index.ts index 715736b461..1cf0a80728 100644 --- a/packages/batch/src/index.ts +++ b/packages/batch/src/index.ts @@ -1 +1,6 @@ +export * from './constants'; +export * from './errors'; +export * from './types'; +export * from './BasePartialProcessor'; +export * from './BasePartialBatchProcessor'; export * from './BatchProcessor'; diff --git a/packages/batch/src/types.ts b/packages/batch/src/types.ts new file mode 100644 index 0000000000..c825adba0f --- /dev/null +++ b/packages/batch/src/types.ts @@ -0,0 +1,31 @@ +import { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; + +// types from base.py +type RecordValue = unknown; +type BaseRecord = { [key: string]: RecordValue }; + +type SQSRecordType = BaseRecord & SQSRecord; +type KinesisStreamRecordType = BaseRecord & KinesisStreamRecord; +type DynamoDBRecordType = BaseRecord & DynamoDBRecord; + +type EventSourceDataClassTypes = + | SQSRecordType + | KinesisStreamRecordType + | DynamoDBRecordType; + +type ResultType = unknown; +type SuccessResponse = [string, ResultType, EventSourceDataClassTypes]; + +type FailureResponse = [string, string, EventSourceDataClassTypes]; + +export type { + RecordValue, + BaseRecord, + SQSRecordType, + KinesisStreamRecordType, + DynamoDBRecordType, + EventSourceDataClassTypes, + ResultType, + SuccessResponse, + FailureResponse, +}; From fdcb59f9707cdfa35e1e1c1c79a54f1197ab7963 Mon Sep 17 00:00:00 2001 From: erikayao93 Date: Mon, 3 Jul 2023 23:05:03 +0000 Subject: [PATCH 04/24] Added BatchProcessor implementation, attempted fix for async --- packages/batch/src/BasePartialProcessor.ts | 25 ++++++-------------- packages/batch/src/BatchProcessor.ts | 27 +++++++++++++++++++++- 2 files changed, 33 insertions(+), 19 deletions(-) diff --git a/packages/batch/src/BasePartialProcessor.ts b/packages/batch/src/BasePartialProcessor.ts index 3ad98d399f..f868770853 100644 --- a/packages/batch/src/BasePartialProcessor.ts +++ b/packages/batch/src/BasePartialProcessor.ts @@ -15,7 +15,7 @@ abstract class BasePartialProcessor { public failureMessages: EventSourceDataClassTypes[]; - public handler?: CallableFunction; + public handler: CallableFunction = new Function(); public lambdaContext?: Context; @@ -54,23 +54,13 @@ abstract class BasePartialProcessor { public abstract clean(): void; - public enter(): BasePartialProcessor { - this.prepare(); - - return this; - } - - public exit(): void { - this.clean(); - } - /** * Keeps track of batch records that failed processing * @param record record that failed processing * @param exception exception that was thrown * @returns FailureResponse object with ["fail", exception, original record] */ - public failure_handler( + public failureHandler( record: EventSourceDataClassTypes, exception: Error ): FailureResponse { @@ -89,10 +79,9 @@ abstract class BasePartialProcessor { */ public async process(): Promise<(SuccessResponse | FailureResponse)[]> { const processedRecords: (SuccessResponse | FailureResponse)[] = []; - - this.records.forEach((record) => { - processedRecords.push(this.processRecord(record)); - }); + for (const record of this.records) { + processedRecords.push(await this.processRecord(record)); + } return processedRecords; } @@ -103,7 +92,7 @@ abstract class BasePartialProcessor { */ public abstract processRecord( record: BaseRecord - ): SuccessResponse | FailureResponse; + ): Promise; /** * Keeps track of batch records that were processed successfully @@ -111,7 +100,7 @@ abstract class BasePartialProcessor { * @param result result from record handler * @returns SuccessResponse object with ["success", result, original record] */ - public success_handler( + public successHandler( record: EventSourceDataClassTypes, result: ResultType ): SuccessResponse { diff --git a/packages/batch/src/BatchProcessor.ts b/packages/batch/src/BatchProcessor.ts index 11979d8db4..c5274b4147 100644 --- a/packages/batch/src/BatchProcessor.ts +++ b/packages/batch/src/BatchProcessor.ts @@ -1,3 +1,28 @@ -class BatchProcessor {} +import { + BasePartialBatchProcessor, + BaseRecord, + FailureResponse, + SuccessResponse, +} from '.'; + +class BatchProcessor extends BasePartialBatchProcessor { + public async processRecord( + record: BaseRecord + ): Promise { + try { + const data = this.toBatchType(record, this.eventType); + let result; + if (this.lambdaContext) { + result = await this.handler(data, this.lambdaContext); + } else { + result = await this.handler(data); + } + + return this.successHandler(record, result); + } catch (e) { + return this.failureHandler(record, e as Error); + } + } +} export { BatchProcessor }; From 8bb91d5f4c746427c42865985724cbbec72d757c Mon Sep 17 00:00:00 2001 From: erikayao93 Date: Thu, 6 Jul 2023 23:25:54 +0000 Subject: [PATCH 05/24] Added unit tests --- .../batch/src/BasePartialBatchProcessor.ts | 55 +-- packages/batch/src/BasePartialProcessor.ts | 45 +- packages/batch/src/BatchProcessor.ts | 7 +- packages/batch/src/errors.ts | 14 +- packages/batch/src/types.ts | 20 +- .../batch/tests/unit/BatchProcessor.test.ts | 457 +++++++++++++++++- 6 files changed, 517 insertions(+), 81 deletions(-) diff --git a/packages/batch/src/BasePartialBatchProcessor.ts b/packages/batch/src/BasePartialBatchProcessor.ts index 6f968b578b..175cb1013a 100644 --- a/packages/batch/src/BasePartialBatchProcessor.ts +++ b/packages/batch/src/BasePartialBatchProcessor.ts @@ -1,19 +1,17 @@ import { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; import { BasePartialProcessor, - BaseRecord, BatchProcessingError, DEFAULT_RESPONSE, - DynamoDBRecordType, EventSourceDataClassTypes, EventType, - KinesisStreamRecordType, - SQSRecordType, } from '.'; abstract class BasePartialBatchProcessor extends BasePartialProcessor { public COLLECTOR_MAPPING; + public DATA_CLASS_MAPPING; + public batchResponse: { [key: string]: { [key: string]: string }[] }; public eventType: EventType; @@ -25,12 +23,17 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { public constructor(eventType: EventType) { super(); this.eventType = eventType; - this.batchResponse = DEFAULT_RESPONSE; // need to find deep clone alternative here + this.batchResponse = DEFAULT_RESPONSE; this.COLLECTOR_MAPPING = { - [EventType.SQS]: this.collectSqsFailures(), - [EventType.KinesisDataStreams]: this.collectKinesisFailures(), - [EventType.DynamoDBStreams]: this.collectDynamoDBFailures(), + [EventType.SQS]: () => this.collectSqsFailures(), + [EventType.KinesisDataStreams]: () => this.collectKinesisFailures(), + [EventType.DynamoDBStreams]: () => this.collectDynamoDBFailures(), }; + this.DATA_CLASS_MAPPING = { + [EventType.SQS]: (record: EventSourceDataClassTypes) => record as SQSRecord, + [EventType.KinesisDataStreams]: (record: EventSourceDataClassTypes) => record as KinesisStreamRecord, + [EventType.DynamoDBStreams]: (record: EventSourceDataClassTypes) => record as DynamoDBRecord, + } } /** @@ -57,12 +60,12 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { public collectDynamoDBFailures(): { [key: string]: string }[] { const failures: { [key: string]: string }[] = []; - this.failureMessages.forEach((msg) => { + for (const msg of this.failureMessages) { const msgId = (msg as DynamoDBRecord).dynamodb?.SequenceNumber; if (msgId) { failures.push({ itemIdentifier: msgId }); } - }); + } return failures; } @@ -70,10 +73,10 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { public collectKinesisFailures(): { [key: string]: string }[] { const failures: { [key: string]: string }[] = []; - this.failureMessages.forEach((msg) => { + for (const msg of this.failureMessages) { const msgId = (msg as KinesisStreamRecord).kinesis.sequenceNumber; failures.push({ itemIdentifier: msgId }); - }); + } return failures; } @@ -81,10 +84,10 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { public collectSqsFailures(): { [key: string]: string }[] { const failures: { [key: string]: string }[] = []; - this.failureMessages.forEach((msg) => { + for (const msg of this.failureMessages) { const msgId = (msg as SQSRecord).messageId; failures.push({ itemIdentifier: msgId }); - }); + } return failures; } @@ -97,7 +100,7 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { * @returns formatted messages to use in batch deletion */ public getMessagesToReport(): { [key: string]: string }[] { - return this.COLLECTOR_MAPPING[this.eventType]; + return this.COLLECTOR_MAPPING[this.eventType](); } public hasMessagesToReport(): boolean { @@ -116,10 +119,10 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { * Remove results from previous execution */ public prepare(): void { - this.successMessages = []; // TO-DO: see if this needs to be a reference delete - this.failureMessages = []; // If it does, use successMessages.length = 0 (less clarity) - this.exceptions = []; // But it clears the array reference, not just a new empty array - this.batchResponse = DEFAULT_RESPONSE; // need to find deep clone alternative here + this.successMessages.length = 0; + this.failureMessages.length = 0; + this.exceptions.length = 0; + this.batchResponse = DEFAULT_RESPONSE; } /** @@ -130,18 +133,10 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { } public toBatchType( - record: BaseRecord, + record: EventSourceDataClassTypes, eventType: EventType - ): EventSourceDataClassTypes { - if (eventType == EventType.SQS) { - return record as SQSRecordType; - } else if (eventType == EventType.KinesisDataStreams) { - return record as KinesisStreamRecordType; - } else if (eventType == EventType.DynamoDBStreams) { - return record as DynamoDBRecordType; - } else { - throw new Error('Invalid EventType provided'); - } + ): SQSRecord | KinesisStreamRecord | DynamoDBRecord { + return this.DATA_CLASS_MAPPING[eventType](record); } } diff --git a/packages/batch/src/BasePartialProcessor.ts b/packages/batch/src/BasePartialProcessor.ts index f868770853..2a158fbc3b 100644 --- a/packages/batch/src/BasePartialProcessor.ts +++ b/packages/batch/src/BasePartialProcessor.ts @@ -1,4 +1,3 @@ -import { Context } from 'aws-lambda'; import { BaseRecord, EventSourceDataClassTypes, @@ -17,8 +16,6 @@ abstract class BasePartialProcessor { public handler: CallableFunction = new Function(); - public lambdaContext?: Context; - public records: BaseRecord[]; public successMessages: EventSourceDataClassTypes[]; @@ -30,28 +27,6 @@ abstract class BasePartialProcessor { this.records = []; } - /** - * Set instance attributes before execution - * @param records List of records to be processed - * @param handler CallableFunction to process entries of "records" - * @param lambdaContext Optional parameter if lambda_context is to be injected - * @returns this object - */ - public call( - records: BaseRecord[], - handler: CallableFunction, - lambdaContext?: Context - ): BasePartialProcessor { - this.records = records; - this.handler = handler; - - if (lambdaContext != null) { - this.lambdaContext = lambdaContext; - } - - return this; - } - public abstract clean(): void; /** @@ -65,6 +40,7 @@ abstract class BasePartialProcessor { exception: Error ): FailureResponse { const entry: FailureResponse = ['fail', exception.message, record]; + console.debug("Record processing exception: " + exception.message); this.exceptions.push(exception); this.failureMessages.push(record); @@ -78,11 +54,14 @@ abstract class BasePartialProcessor { * @returns List of processed records */ public async process(): Promise<(SuccessResponse | FailureResponse)[]> { + this.prepare(); + const processedRecords: (SuccessResponse | FailureResponse)[] = []; for (const record of this.records) { processedRecords.push(await this.processRecord(record)); } + this.clean(); return processedRecords; } @@ -94,6 +73,22 @@ abstract class BasePartialProcessor { record: BaseRecord ): Promise; + /** + * Set instance attributes before execution + * @param records List of records to be processed + * @param handler CallableFunction to process entries of "records" + * @returns this object + */ + public register( + records: BaseRecord[], + handler: CallableFunction, + ): BasePartialProcessor { + this.records = records; + this.handler = handler; + + return this; + } + /** * Keeps track of batch records that were processed successfully * @param record record that succeeded processing diff --git a/packages/batch/src/BatchProcessor.ts b/packages/batch/src/BatchProcessor.ts index c5274b4147..38bc5adbcb 100644 --- a/packages/batch/src/BatchProcessor.ts +++ b/packages/batch/src/BatchProcessor.ts @@ -11,12 +11,7 @@ class BatchProcessor extends BasePartialBatchProcessor { ): Promise { try { const data = this.toBatchType(record, this.eventType); - let result; - if (this.lambdaContext) { - result = await this.handler(data, this.lambdaContext); - } else { - result = await this.handler(data); - } + let result = await this.handler(data); return this.successHandler(record, result); } catch (e) { diff --git a/packages/batch/src/errors.ts b/packages/batch/src/errors.ts index dee8cb2de7..747d54939a 100644 --- a/packages/batch/src/errors.ts +++ b/packages/batch/src/errors.ts @@ -4,15 +4,27 @@ class BaseBatchProcessingError extends Error { public msg: string; public constructor(msg: string, childExceptions: Error[]) { - super(); + super(msg); this.msg = msg; this.childExceptions = childExceptions; } + + public formatExceptions(parentExceptionString: string): string { + let exceptionList: string[] = [parentExceptionString + "\n"]; + + for (const exception of this.childExceptions) { + exceptionList.push(exception.message); + } + + return "\n" + exceptionList; + } } class BatchProcessingError extends BaseBatchProcessingError { public constructor(msg: string, childExceptions: Error[]) { super(msg, childExceptions); + let parentExceptionString: string = this.message; + this.message = this.formatExceptions(parentExceptionString) } } diff --git a/packages/batch/src/types.ts b/packages/batch/src/types.ts index c825adba0f..775c60cc71 100644 --- a/packages/batch/src/types.ts +++ b/packages/batch/src/types.ts @@ -1,17 +1,13 @@ import { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; // types from base.py -type RecordValue = unknown; -type BaseRecord = { [key: string]: RecordValue }; - -type SQSRecordType = BaseRecord & SQSRecord; -type KinesisStreamRecordType = BaseRecord & KinesisStreamRecord; -type DynamoDBRecordType = BaseRecord & DynamoDBRecord; - type EventSourceDataClassTypes = - | SQSRecordType - | KinesisStreamRecordType - | DynamoDBRecordType; + | SQSRecord + | KinesisStreamRecord + | DynamoDBRecord; + +type RecordValue = unknown; +type BaseRecord = { [key: string]: RecordValue } | EventSourceDataClassTypes; type ResultType = unknown; type SuccessResponse = [string, ResultType, EventSourceDataClassTypes]; @@ -19,11 +15,7 @@ type SuccessResponse = [string, ResultType, EventSourceDataClassTypes]; type FailureResponse = [string, string, EventSourceDataClassTypes]; export type { - RecordValue, BaseRecord, - SQSRecordType, - KinesisStreamRecordType, - DynamoDBRecordType, EventSourceDataClassTypes, ResultType, SuccessResponse, diff --git a/packages/batch/tests/unit/BatchProcessor.test.ts b/packages/batch/tests/unit/BatchProcessor.test.ts index f411976325..1caec1bbbd 100644 --- a/packages/batch/tests/unit/BatchProcessor.test.ts +++ b/packages/batch/tests/unit/BatchProcessor.test.ts @@ -3,9 +3,123 @@ * * @group unit/batch/class/batchprocessor */ -import { BatchProcessor } from '../../src'; +import { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; +import { BatchProcessingError, BatchProcessor, EventType } from '../../src'; +import { v4 } from 'uuid'; -describe('Class: IdempotencyConfig', () => { +const sqsEventFactory = (body: string): SQSRecord => { + return { + "messageId": v4(), + "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", + "body": body, + "attributes": { + "ApproximateReceiveCount": "1", + "SentTimestamp": "1545082649183", + "SenderId": "AIDAIENQZJOLO23YVJ4VO", + "ApproximateFirstReceiveTimestamp": "1545082649185", + }, + "messageAttributes": {}, + "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", + "eventSource": "aws:sqs", + "eventSourceARN": "arn:aws:sqs:us-east-2:123456789012:my-queue", + "awsRegion": "us-east-1", + } +}; + +const kinesisEventFactory = (body: string): KinesisStreamRecord => { + let seq: string = ""; + for (var i = 0; i < 52; i++) { + seq = seq + Math.floor(Math.random() * 10); + } + return { + "kinesis": { + "kinesisSchemaVersion": "1.0", + "partitionKey": "1", + "sequenceNumber": seq, + "data": body, + "approximateArrivalTimestamp": 1545084650.987, + }, + "eventSource": "aws:kinesis", + "eventVersion": "1.0", + "eventID": "shardId-000000000006:" + seq, + "eventName": "aws:kinesis:record", + "invokeIdentityArn": "arn:aws:iam::123456789012:role/lambda-role", + "awsRegion": "us-east-2", + "eventSourceARN": "arn:aws:kinesis:us-east-2:123456789012:stream/lambda-stream", + } +}; + +const dynamodbEventFactory = (body: string): DynamoDBRecord => { + let seq: string = ""; + for (var i = 0; i < 10; i++) { + seq = seq + Math.floor(Math.random() * 10); + } + return { + "eventID": "1", + "eventVersion": "1.0", + "dynamodb": { + "Keys": {"Id": {"N": "101"}}, + "NewImage": {"Message": {"S": body}}, + "StreamViewType": "NEW_AND_OLD_IMAGES", + "SequenceNumber": seq, + "SizeBytes": 26, + }, + "awsRegion": "us-west-2", + "eventName": "INSERT", + "eventSourceARN": "eventsource_arn", + "eventSource": "aws:dynamodb", + } +}; + +const sqsRecordHandler = (record: SQSRecord): Object => { + let body = record.body; + if (body.includes("fail")) { + throw Error("Failed to process record."); + } + return body; +}; + +const asyncSqsRecordHandler = async (record: SQSRecord): Promise => { + let body = record.body; + if (body.includes("fail")) { + throw Error("Failed to process record."); + } + return body; +}; + +const kinesisRecordHandler = (record: KinesisStreamRecord): Object => { + let body = record.kinesis.data; + if (body.includes("fail")) { + throw Error("Failed to process record."); + } + return body; +}; + +const asyncKinesisRecordHandler = async (record: KinesisStreamRecord): Promise => { + let body = record.kinesis.data; + if (body.includes("fail")) { + throw Error("Failed to process record."); + } + return body; +}; + +const dynamodbRecordHandler = (record: DynamoDBRecord): Object => { + let body = record.dynamodb?.NewImage?.Message || {"S": "fail"}; + if (body["S"]?.includes("fail")) { + throw Error("Failed to process record."); + } + return body; +}; + +const asyncDynamodbRecordHandler = async (record: DynamoDBRecord): Promise => { + let body = await record.dynamodb?.NewImage?.Message || {"S": "fail"}; + if (body["S"]?.includes("fail")) { + throw Error("Failed to process record."); + } + return body; +}; + +describe('Class: BatchProcessor', () => { const ENVIRONMENT_VARIABLES = process.env; beforeEach(() => { @@ -18,9 +132,342 @@ describe('Class: IdempotencyConfig', () => { process.env = ENVIRONMENT_VARIABLES; }); - describe('remove me', () => { - test('does stuff', () => { - expect(BatchProcessor).toBeDefined(); + describe('Synchronously processing SQS Records', () => { + test('Batch processing SQS records with no failures', async () => { + // Prepare + let firstRecord = sqsEventFactory("success"); + let secondRecord = sqsEventFactory("success"); + let records = [firstRecord, secondRecord]; + let processor = new BatchProcessor(EventType.SQS); + + // Act + processor.register(records, sqsRecordHandler); + let processedMessages = await processor.process(); + + // Assess + expect(processedMessages).toStrictEqual([["success", (firstRecord as SQSRecord).body, firstRecord], + ["success", (secondRecord as SQSRecord).body, secondRecord]]); + }); + + test('Batch processing SQS records with some failures', async () => { + // Prepare + let firstRecord = sqsEventFactory("failure"); + let secondRecord = sqsEventFactory("success"); + let thirdRecord = sqsEventFactory("fail"); + let records = [firstRecord, secondRecord, thirdRecord]; + let processor = new BatchProcessor(EventType.SQS); + + // Act + processor.register(records, sqsRecordHandler); + let processedMessages = await processor.process(); + + // Asses + expect(processedMessages[1]).toStrictEqual(["success", (secondRecord as SQSRecord).body, secondRecord]); + expect(processor.failureMessages.length).toBe(2); + expect(processor.response()).toStrictEqual( + {"batchItemFailures": + [{"itemIdentifier": (firstRecord as SQSRecord).messageId}, + {"itemIdentifier": (thirdRecord as SQSRecord).messageId}] + }, + ); + }); + + test('Batch processing SQS records with all failures', async () => { + // Prepare + let firstRecord = sqsEventFactory("failure"); + let secondRecord = sqsEventFactory("failure"); + let thirdRecord = sqsEventFactory("fail"); + + let records = [firstRecord, secondRecord, thirdRecord]; + let processor = new BatchProcessor(EventType.SQS); + + // Act & Assess + processor.register(records, sqsRecordHandler); + await expect(processor.process()).rejects.toThrowError(BatchProcessingError); + }); + }); + + describe('Asynchronously processing SQS Records', () => { + test('Batch processing SQS records with no failures', async () => { + // Prepare + let firstRecord = sqsEventFactory("success"); + let secondRecord = sqsEventFactory("success"); + let records = [firstRecord, secondRecord]; + let processor = new BatchProcessor(EventType.SQS); + + // Act + processor.register(records, asyncSqsRecordHandler); + let processedMessages = await processor.process(); + + // Assess + expect(processedMessages).toStrictEqual([["success", (firstRecord as SQSRecord).body, firstRecord], + ["success", (secondRecord as SQSRecord).body, secondRecord]]); + }); + + test('Batch processing SQS records with failures', async () => { + // Prepare + let firstRecord = sqsEventFactory("failure"); + let secondRecord = sqsEventFactory("success"); + let thirdRecord = sqsEventFactory("fail"); + let records = [firstRecord, secondRecord, thirdRecord]; + let processor = new BatchProcessor(EventType.SQS); + + // Act + processor.register(records, asyncSqsRecordHandler); + let processedMessages = await processor.process(); + + // Assess + expect(processedMessages[1]).toStrictEqual(["success", (secondRecord as SQSRecord).body, secondRecord]); + expect(processor.failureMessages.length).toBe(2); + expect(processor.response()).toStrictEqual( + {"batchItemFailures": + [{"itemIdentifier": (firstRecord as SQSRecord).messageId}, + {"itemIdentifier": (thirdRecord as SQSRecord).messageId}] + } + ); + }); + + test('Batch processing SQS records with all failures', async () => { + // Prepare + let firstRecord = sqsEventFactory("failure"); + let secondRecord = sqsEventFactory("failure"); + let thirdRecord = sqsEventFactory("fail"); + + let records = [firstRecord, secondRecord, thirdRecord]; + let processor = new BatchProcessor(EventType.SQS); + + // Act + processor.register(records, asyncSqsRecordHandler); + + // Assess + await expect(processor.process()).rejects.toThrowError(BatchProcessingError); + }); + }); + + describe('Synchronously processing Kinesis Records', () => { + test('Batch processing Kinesis records with no failures', async () => { + // Prepare + let firstRecord = kinesisEventFactory("success"); + let secondRecord = kinesisEventFactory("success"); + let records = [firstRecord, secondRecord]; + let processor = new BatchProcessor(EventType.KinesisDataStreams); + + // Act + processor.register(records, kinesisRecordHandler); + let processedMessages = await processor.process(); + + // Assess + expect(processedMessages).toStrictEqual([["success", (firstRecord as KinesisStreamRecord).kinesis.data, firstRecord], + ["success", (secondRecord as KinesisStreamRecord).kinesis.data, secondRecord]]); + }); + + test('Batch processing Kinesis records with failures', async () => { + // Prepare + let firstRecord = kinesisEventFactory("failure"); + let secondRecord = kinesisEventFactory("success"); + let thirdRecord = kinesisEventFactory("fail"); + let records = [firstRecord, secondRecord, thirdRecord]; + let processor = new BatchProcessor(EventType.KinesisDataStreams); + + // Act + processor.register(records, kinesisRecordHandler); + let processedMessages = await processor.process(); + + // Assess + expect(processedMessages[1]).toStrictEqual(["success", (secondRecord as KinesisStreamRecord).kinesis.data, secondRecord]); + expect(processor.failureMessages.length).toBe(2); + expect(processor.response()).toStrictEqual( + {"batchItemFailures": + [{"itemIdentifier": (firstRecord as KinesisStreamRecord).kinesis.sequenceNumber}, + {"itemIdentifier": (thirdRecord as KinesisStreamRecord).kinesis.sequenceNumber}] + } + ); + }); + + test('Batch processing Kinesis records with all failures', async () => { + let firstRecord = kinesisEventFactory("failure"); + let secondRecord = kinesisEventFactory("failure"); + let thirdRecord = kinesisEventFactory("fail"); + + let records = [firstRecord, secondRecord, thirdRecord]; + let processor = new BatchProcessor(EventType.KinesisDataStreams); + + // Act + processor.register(records, kinesisRecordHandler); + + // Assess + await expect(processor.process()).rejects.toThrowError(BatchProcessingError); + }); + }); + + describe('Asynchronously processing Kinesis Records', () => { + test('Batch processing Kinesis records with no failures', async () => { + // Prepare + let firstRecord = kinesisEventFactory("success"); + let secondRecord = kinesisEventFactory("success"); + let records = [firstRecord, secondRecord]; + let processor = new BatchProcessor(EventType.KinesisDataStreams); + + // Act + processor.register(records, asyncKinesisRecordHandler); + let processedMessages = await processor.process(); + + // Assess + expect(processedMessages).toStrictEqual([["success", (firstRecord as KinesisStreamRecord).kinesis.data, firstRecord], + ["success", (secondRecord as KinesisStreamRecord).kinesis.data, secondRecord]]); + }); + + test('Batch processing Kinesis records with failures', async () => { + // Prepare + let firstRecord = kinesisEventFactory("failure"); + let secondRecord = kinesisEventFactory("success"); + let thirdRecord = kinesisEventFactory("fail"); + let records = [firstRecord, secondRecord, thirdRecord]; + let processor = new BatchProcessor(EventType.KinesisDataStreams); + + // Act + processor.register(records, asyncKinesisRecordHandler); + let processedMessages = await processor.process(); + + // Assess + expect(processedMessages[1]).toStrictEqual(["success", (secondRecord as KinesisStreamRecord).kinesis.data, secondRecord]); + expect(processor.failureMessages.length).toBe(2); + expect(processor.response()).toStrictEqual( + {"batchItemFailures": + [{"itemIdentifier": (firstRecord as KinesisStreamRecord).kinesis.sequenceNumber}, + {"itemIdentifier": (thirdRecord as KinesisStreamRecord).kinesis.sequenceNumber}] + } + ); + }); + + test('Batch processing Kinesis records with all failures', async () => { + // Prepare + let firstRecord = kinesisEventFactory("failure"); + let secondRecord = kinesisEventFactory("failure"); + let thirdRecord = kinesisEventFactory("fail"); + + let records = [firstRecord, secondRecord, thirdRecord]; + let processor = new BatchProcessor(EventType.KinesisDataStreams); + + // Act + processor.register(records, asyncKinesisRecordHandler); + + // Assess + await expect(processor.process()).rejects.toThrowError(BatchProcessingError); + }); + }); + + describe('Synchronously processing DynamoDB Records', () => { + test('Batch processing DynamoDB records with no failures', async () => { + // Prepare + let firstRecord = dynamodbEventFactory("success"); + let secondRecord = dynamodbEventFactory("success"); + let records = [firstRecord, secondRecord]; + let processor = new BatchProcessor(EventType.DynamoDBStreams); + + // Act + processor.register(records, dynamodbRecordHandler); + let processedMessages = await processor.process(); + + // Assess + expect(processedMessages).toStrictEqual([["success", (firstRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, firstRecord], + ["success", (secondRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, secondRecord]]); + }); + + test('Batch processing DynamoDB records with failures', async () => { + // Prepare + let firstRecord = dynamodbEventFactory("failure"); + let secondRecord = dynamodbEventFactory("success"); + let thirdRecord = dynamodbEventFactory("fail"); + let records = [firstRecord, secondRecord, thirdRecord]; + let processor = new BatchProcessor(EventType.DynamoDBStreams); + + // Act + processor.register(records, dynamodbRecordHandler); + let processedMessages = await processor.process(); + + // Assess + expect(processedMessages[1]).toStrictEqual(["success", (secondRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, secondRecord]); + expect(processor.failureMessages.length).toBe(2); + expect(processor.response()).toStrictEqual( + {"batchItemFailures": + [{"itemIdentifier": (firstRecord as DynamoDBRecord).dynamodb?.SequenceNumber}, + {"itemIdentifier": (thirdRecord as DynamoDBRecord).dynamodb?.SequenceNumber}] + } + ); + }); + + test('Batch processing DynamoDB records with all failures', async () => { + // Prepare + let firstRecord = dynamodbEventFactory("failure"); + let secondRecord = dynamodbEventFactory("failure"); + let thirdRecord = dynamodbEventFactory("fail"); + + let records = [firstRecord, secondRecord, thirdRecord]; + let processor = new BatchProcessor(EventType.DynamoDBStreams); + + // Act + processor.register(records, dynamodbRecordHandler); + + // Assess + await expect(processor.process()).rejects.toThrowError(BatchProcessingError); + }); + }); + + describe('Asynchronously processing DynamoDB Records', () => { + test('Batch processing DynamoDB records with no failures', async () => { + // Prepare + let firstRecord = dynamodbEventFactory("success"); + let secondRecord = dynamodbEventFactory("success"); + let records = [firstRecord, secondRecord]; + let processor = new BatchProcessor(EventType.DynamoDBStreams); + + // Act + processor.register(records, asyncDynamodbRecordHandler); + let processedMessages = await processor.process(); + + // Assess + expect(processedMessages).toStrictEqual([["success", (firstRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, firstRecord], + ["success", (secondRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, secondRecord]]); + }); + + test('Batch processing DynamoDB records with failures', async () => { + // Prepare + let firstRecord = dynamodbEventFactory("failure"); + let secondRecord = dynamodbEventFactory("success"); + let thirdRecord = dynamodbEventFactory("fail"); + let records = [firstRecord, secondRecord, thirdRecord]; + let processor = new BatchProcessor(EventType.DynamoDBStreams); + + // Act + processor.register(records, asyncDynamodbRecordHandler); + let processedMessages = await processor.process(); + + // Assess + expect(processedMessages[1]).toStrictEqual(["success", (secondRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, secondRecord]); + expect(processor.failureMessages.length).toBe(2); + expect(processor.response()).toStrictEqual( + {"batchItemFailures": + [{"itemIdentifier": (firstRecord as DynamoDBRecord).dynamodb?.SequenceNumber}, + {"itemIdentifier": (thirdRecord as DynamoDBRecord).dynamodb?.SequenceNumber}] + } + ); + }); + + test('Batch processing DynamoDB records with all failures', async () => { + // Prepare + let firstRecord = dynamodbEventFactory("failure"); + let secondRecord = dynamodbEventFactory("failure"); + let thirdRecord = dynamodbEventFactory("fail"); + + let records = [firstRecord, secondRecord, thirdRecord]; + let processor = new BatchProcessor(EventType.DynamoDBStreams); + + // Act + processor.register(records, asyncDynamodbRecordHandler); + + // Assess + await expect(processor.process()).rejects.toThrowError(BatchProcessingError); }); }); }); From 8767d6a601caa709dc5461c11e9455d0157c75ea Mon Sep 17 00:00:00 2001 From: erikayao93 Date: Thu, 6 Jul 2023 23:42:01 +0000 Subject: [PATCH 06/24] Refactoring unit tests --- packages/batch/tests/helpers/factories.ts | 66 +++++++ packages/batch/tests/helpers/handlers.ts | 49 +++++ .../batch/tests/unit/BatchProcessor.test.ts | 169 +++++------------- 3 files changed, 157 insertions(+), 127 deletions(-) create mode 100644 packages/batch/tests/helpers/factories.ts create mode 100644 packages/batch/tests/helpers/handlers.ts diff --git a/packages/batch/tests/helpers/factories.ts b/packages/batch/tests/helpers/factories.ts new file mode 100644 index 0000000000..de72110d9c --- /dev/null +++ b/packages/batch/tests/helpers/factories.ts @@ -0,0 +1,66 @@ +import { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; +import { v4 } from 'uuid'; + +export const sqsEventFactory = (body: string): SQSRecord => { + return { + "messageId": v4(), + "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", + "body": body, + "attributes": { + "ApproximateReceiveCount": "1", + "SentTimestamp": "1545082649183", + "SenderId": "AIDAIENQZJOLO23YVJ4VO", + "ApproximateFirstReceiveTimestamp": "1545082649185", + }, + "messageAttributes": {}, + "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", + "eventSource": "aws:sqs", + "eventSourceARN": "arn:aws:sqs:us-east-2:123456789012:my-queue", + "awsRegion": "us-east-1", + } +}; + +export const kinesisEventFactory = (body: string): KinesisStreamRecord => { + let seq: string = ""; + for (var i = 0; i < 52; i++) { + seq = seq + Math.floor(Math.random() * 10); + } + return { + "kinesis": { + "kinesisSchemaVersion": "1.0", + "partitionKey": "1", + "sequenceNumber": seq, + "data": body, + "approximateArrivalTimestamp": 1545084650.987, + }, + "eventSource": "aws:kinesis", + "eventVersion": "1.0", + "eventID": "shardId-000000000006:" + seq, + "eventName": "aws:kinesis:record", + "invokeIdentityArn": "arn:aws:iam::123456789012:role/lambda-role", + "awsRegion": "us-east-2", + "eventSourceARN": "arn:aws:kinesis:us-east-2:123456789012:stream/lambda-stream", + } +}; + +export const dynamodbEventFactory = (body: string): DynamoDBRecord => { + let seq: string = ""; + for (var i = 0; i < 10; i++) { + seq = seq + Math.floor(Math.random() * 10); + } + return { + "eventID": "1", + "eventVersion": "1.0", + "dynamodb": { + "Keys": {"Id": {"N": "101"}}, + "NewImage": {"Message": {"S": body}}, + "StreamViewType": "NEW_AND_OLD_IMAGES", + "SequenceNumber": seq, + "SizeBytes": 26, + }, + "awsRegion": "us-west-2", + "eventName": "INSERT", + "eventSourceARN": "eventsource_arn", + "eventSource": "aws:dynamodb", + } +}; \ No newline at end of file diff --git a/packages/batch/tests/helpers/handlers.ts b/packages/batch/tests/helpers/handlers.ts new file mode 100644 index 0000000000..3c73d1a85b --- /dev/null +++ b/packages/batch/tests/helpers/handlers.ts @@ -0,0 +1,49 @@ +import { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; + +export const sqsRecordHandler = (record: SQSRecord): string => { + let body = record.body; + if (body.includes("fail")) { + throw Error("Failed to process record."); + } + return body; +}; + +export const asyncSqsRecordHandler = async (record: SQSRecord): Promise => { + let body = record.body; + if (body.includes("fail")) { + throw Error("Failed to process record."); + } + return body; +}; + +export const kinesisRecordHandler = (record: KinesisStreamRecord): string => { + let body = record.kinesis.data; + if (body.includes("fail")) { + throw Error("Failed to process record."); + } + return body; +}; + +export const asyncKinesisRecordHandler = async (record: KinesisStreamRecord): Promise => { + let body = record.kinesis.data; + if (body.includes("fail")) { + throw Error("Failed to process record."); + } + return body; +}; + +export const dynamodbRecordHandler = (record: DynamoDBRecord): object => { + let body = record.dynamodb?.NewImage?.Message || {"S": "fail"}; + if (body["S"]?.includes("fail")) { + throw Error("Failed to process record."); + } + return body; +}; + +export const asyncDynamodbRecordHandler = async (record: DynamoDBRecord): Promise => { + let body = await record.dynamodb?.NewImage?.Message || {"S": "fail"}; + if (body["S"]?.includes("fail")) { + throw Error("Failed to process record."); + } + return body; +}; \ No newline at end of file diff --git a/packages/batch/tests/unit/BatchProcessor.test.ts b/packages/batch/tests/unit/BatchProcessor.test.ts index 1caec1bbbd..06787f701d 100644 --- a/packages/batch/tests/unit/BatchProcessor.test.ts +++ b/packages/batch/tests/unit/BatchProcessor.test.ts @@ -3,121 +3,24 @@ * * @group unit/batch/class/batchprocessor */ -import { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; import { BatchProcessingError, BatchProcessor, EventType } from '../../src'; -import { v4 } from 'uuid'; - -const sqsEventFactory = (body: string): SQSRecord => { - return { - "messageId": v4(), - "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", - "body": body, - "attributes": { - "ApproximateReceiveCount": "1", - "SentTimestamp": "1545082649183", - "SenderId": "AIDAIENQZJOLO23YVJ4VO", - "ApproximateFirstReceiveTimestamp": "1545082649185", - }, - "messageAttributes": {}, - "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", - "eventSource": "aws:sqs", - "eventSourceARN": "arn:aws:sqs:us-east-2:123456789012:my-queue", - "awsRegion": "us-east-1", - } -}; - -const kinesisEventFactory = (body: string): KinesisStreamRecord => { - let seq: string = ""; - for (var i = 0; i < 52; i++) { - seq = seq + Math.floor(Math.random() * 10); - } - return { - "kinesis": { - "kinesisSchemaVersion": "1.0", - "partitionKey": "1", - "sequenceNumber": seq, - "data": body, - "approximateArrivalTimestamp": 1545084650.987, - }, - "eventSource": "aws:kinesis", - "eventVersion": "1.0", - "eventID": "shardId-000000000006:" + seq, - "eventName": "aws:kinesis:record", - "invokeIdentityArn": "arn:aws:iam::123456789012:role/lambda-role", - "awsRegion": "us-east-2", - "eventSourceARN": "arn:aws:kinesis:us-east-2:123456789012:stream/lambda-stream", - } -}; - -const dynamodbEventFactory = (body: string): DynamoDBRecord => { - let seq: string = ""; - for (var i = 0; i < 10; i++) { - seq = seq + Math.floor(Math.random() * 10); - } - return { - "eventID": "1", - "eventVersion": "1.0", - "dynamodb": { - "Keys": {"Id": {"N": "101"}}, - "NewImage": {"Message": {"S": body}}, - "StreamViewType": "NEW_AND_OLD_IMAGES", - "SequenceNumber": seq, - "SizeBytes": 26, - }, - "awsRegion": "us-west-2", - "eventName": "INSERT", - "eventSourceARN": "eventsource_arn", - "eventSource": "aws:dynamodb", - } -}; - -const sqsRecordHandler = (record: SQSRecord): Object => { - let body = record.body; - if (body.includes("fail")) { - throw Error("Failed to process record."); - } - return body; -}; - -const asyncSqsRecordHandler = async (record: SQSRecord): Promise => { - let body = record.body; - if (body.includes("fail")) { - throw Error("Failed to process record."); - } - return body; -}; - -const kinesisRecordHandler = (record: KinesisStreamRecord): Object => { - let body = record.kinesis.data; - if (body.includes("fail")) { - throw Error("Failed to process record."); - } - return body; -}; - -const asyncKinesisRecordHandler = async (record: KinesisStreamRecord): Promise => { - let body = record.kinesis.data; - if (body.includes("fail")) { - throw Error("Failed to process record."); - } - return body; -}; - -const dynamodbRecordHandler = (record: DynamoDBRecord): Object => { - let body = record.dynamodb?.NewImage?.Message || {"S": "fail"}; - if (body["S"]?.includes("fail")) { - throw Error("Failed to process record."); - } - return body; -}; - -const asyncDynamodbRecordHandler = async (record: DynamoDBRecord): Promise => { - let body = await record.dynamodb?.NewImage?.Message || {"S": "fail"}; - if (body["S"]?.includes("fail")) { - throw Error("Failed to process record."); - } - return body; -}; +import { DynamoDBRecord, + KinesisStreamRecord, + SQSRecord +} from 'aws-lambda'; +import { + sqsEventFactory, + kinesisEventFactory, + dynamodbEventFactory +} from '../../tests/helpers/factories'; +import { + sqsRecordHandler, + asyncSqsRecordHandler, + kinesisRecordHandler, + asyncKinesisRecordHandler, + dynamodbRecordHandler, + asyncDynamodbRecordHandler +} from '../../tests/helpers/handlers'; describe('Class: BatchProcessor', () => { const ENVIRONMENT_VARIABLES = process.env; @@ -145,8 +48,10 @@ describe('Class: BatchProcessor', () => { let processedMessages = await processor.process(); // Assess - expect(processedMessages).toStrictEqual([["success", (firstRecord as SQSRecord).body, firstRecord], - ["success", (secondRecord as SQSRecord).body, secondRecord]]); + expect(processedMessages).toStrictEqual([ + ["success", (firstRecord as SQSRecord).body, firstRecord], + ["success", (secondRecord as SQSRecord).body, secondRecord] + ]); }); test('Batch processing SQS records with some failures', async () => { @@ -168,7 +73,7 @@ describe('Class: BatchProcessor', () => { {"batchItemFailures": [{"itemIdentifier": (firstRecord as SQSRecord).messageId}, {"itemIdentifier": (thirdRecord as SQSRecord).messageId}] - }, + } ); }); @@ -200,8 +105,10 @@ describe('Class: BatchProcessor', () => { let processedMessages = await processor.process(); // Assess - expect(processedMessages).toStrictEqual([["success", (firstRecord as SQSRecord).body, firstRecord], - ["success", (secondRecord as SQSRecord).body, secondRecord]]); + expect(processedMessages).toStrictEqual([ + ["success", (firstRecord as SQSRecord).body, firstRecord], + ["success", (secondRecord as SQSRecord).body, secondRecord] + ]); }); test('Batch processing SQS records with failures', async () => { @@ -257,8 +164,10 @@ describe('Class: BatchProcessor', () => { let processedMessages = await processor.process(); // Assess - expect(processedMessages).toStrictEqual([["success", (firstRecord as KinesisStreamRecord).kinesis.data, firstRecord], - ["success", (secondRecord as KinesisStreamRecord).kinesis.data, secondRecord]]); + expect(processedMessages).toStrictEqual([ + ["success", (firstRecord as KinesisStreamRecord).kinesis.data, firstRecord], + ["success", (secondRecord as KinesisStreamRecord).kinesis.data, secondRecord] + ]); }); test('Batch processing Kinesis records with failures', async () => { @@ -313,8 +222,10 @@ describe('Class: BatchProcessor', () => { let processedMessages = await processor.process(); // Assess - expect(processedMessages).toStrictEqual([["success", (firstRecord as KinesisStreamRecord).kinesis.data, firstRecord], - ["success", (secondRecord as KinesisStreamRecord).kinesis.data, secondRecord]]); + expect(processedMessages).toStrictEqual([ + ["success", (firstRecord as KinesisStreamRecord).kinesis.data, firstRecord], + ["success", (secondRecord as KinesisStreamRecord).kinesis.data, secondRecord] + ]); }); test('Batch processing Kinesis records with failures', async () => { @@ -370,8 +281,10 @@ describe('Class: BatchProcessor', () => { let processedMessages = await processor.process(); // Assess - expect(processedMessages).toStrictEqual([["success", (firstRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, firstRecord], - ["success", (secondRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, secondRecord]]); + expect(processedMessages).toStrictEqual([ + ["success", (firstRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, firstRecord], + ["success", (secondRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, secondRecord] + ]); }); test('Batch processing DynamoDB records with failures', async () => { @@ -427,8 +340,10 @@ describe('Class: BatchProcessor', () => { let processedMessages = await processor.process(); // Assess - expect(processedMessages).toStrictEqual([["success", (firstRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, firstRecord], - ["success", (secondRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, secondRecord]]); + expect(processedMessages).toStrictEqual([ + ["success", (firstRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, firstRecord], + ["success", (secondRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, secondRecord] + ]); }); test('Batch processing DynamoDB records with failures', async () => { From a90e2b60fb446038c325ac86212c7a12bfc563eb Mon Sep 17 00:00:00 2001 From: erikayao93 Date: Fri, 7 Jul 2023 16:46:49 +0000 Subject: [PATCH 07/24] Lint fix, updated docstrings --- package-lock.json | 10 +- .../batch/src/BasePartialBatchProcessor.ts | 40 +- packages/batch/src/BasePartialProcessor.ts | 25 +- packages/batch/src/BatchProcessor.ts | 10 +- packages/batch/src/constants.ts | 16 +- packages/batch/src/errors.ts | 15 +- packages/batch/src/types.ts | 3 + packages/batch/tests/helpers/factories.ts | 115 ++--- packages/batch/tests/helpers/handlers.ts | 80 ++-- .../batch/tests/unit/BatchProcessor.test.ts | 441 +++++++++++------- 10 files changed, 460 insertions(+), 295 deletions(-) diff --git a/package-lock.json b/package-lock.json index 98e3248089..b23aeae058 100644 --- a/package-lock.json +++ b/package-lock.json @@ -639,6 +639,10 @@ "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==" }, + "node_modules/@aws-lambda-powertools/batch": { + "resolved": "packages/batch", + "link": true + }, "node_modules/@aws-lambda-powertools/commons": { "resolved": "packages/commons", "link": true @@ -6588,10 +6592,6 @@ ], "license": "MIT" }, - "node_modules/batch": { - "resolved": "packages/batch", - "link": true - }, "node_modules/before-after-hook": { "version": "2.2.3", "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-2.2.3.tgz", @@ -18506,4 +18506,4 @@ } } } -} +} \ No newline at end of file diff --git a/packages/batch/src/BasePartialBatchProcessor.ts b/packages/batch/src/BasePartialBatchProcessor.ts index 175cb1013a..873e23dbe4 100644 --- a/packages/batch/src/BasePartialBatchProcessor.ts +++ b/packages/batch/src/BasePartialBatchProcessor.ts @@ -1,7 +1,11 @@ +/** + * Process batch and partially report failed items + */ import { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; import { BasePartialProcessor, BatchProcessingError, + DATA_CLASS_MAPPING, DEFAULT_RESPONSE, EventSourceDataClassTypes, EventType, @@ -10,30 +14,23 @@ import { abstract class BasePartialBatchProcessor extends BasePartialProcessor { public COLLECTOR_MAPPING; - public DATA_CLASS_MAPPING; - public batchResponse: { [key: string]: { [key: string]: string }[] }; public eventType: EventType; /** - * Process batch and partially report failed items + * Initializes base batch processing class * @param eventType Whether this is SQS, DynamoDB stream, or Kinesis data stream event */ public constructor(eventType: EventType) { super(); this.eventType = eventType; - this.batchResponse = DEFAULT_RESPONSE; + this.batchResponse = DEFAULT_RESPONSE; this.COLLECTOR_MAPPING = { [EventType.SQS]: () => this.collectSqsFailures(), [EventType.KinesisDataStreams]: () => this.collectKinesisFailures(), [EventType.DynamoDBStreams]: () => this.collectDynamoDBFailures(), }; - this.DATA_CLASS_MAPPING = { - [EventType.SQS]: (record: EventSourceDataClassTypes) => record as SQSRecord, - [EventType.KinesisDataStreams]: (record: EventSourceDataClassTypes) => record as KinesisStreamRecord, - [EventType.DynamoDBStreams]: (record: EventSourceDataClassTypes) => record as DynamoDBRecord, - } } /** @@ -57,6 +54,10 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { this.batchResponse = { batchItemFailures: messages }; } + /** + * Collects identifiers of failed items for a DynamoDB stream + * @returns list of identifiers for failed items + */ public collectDynamoDBFailures(): { [key: string]: string }[] { const failures: { [key: string]: string }[] = []; @@ -70,6 +71,10 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { return failures; } + /** + * Collects identifiers of failed items for a Kinesis stream + * @returns list of identifiers for failed items + */ public collectKinesisFailures(): { [key: string]: string }[] { const failures: { [key: string]: string }[] = []; @@ -81,6 +86,10 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { return failures; } + /** + * Collects identifiers of failed items for an SQS batch + * @returns list of identifiers for failed items + */ public collectSqsFailures(): { [key: string]: string }[] { const failures: { [key: string]: string }[] = []; @@ -92,17 +101,26 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { return failures; } + /** + * Determines whether all records in a batch failed to process + * @returns true if all records resulted in exception results + */ public entireBatchFailed(): boolean { return this.exceptions.length == this.records.length; } /** + * Collects identifiers for failed batch items * @returns formatted messages to use in batch deletion */ public getMessagesToReport(): { [key: string]: string }[] { return this.COLLECTOR_MAPPING[this.eventType](); } + /** + * Determines if any records failed to process + * @returns true if any records resulted in exception + */ public hasMessagesToReport(): boolean { if (this.failureMessages.length != 0) { return true; @@ -122,7 +140,7 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { this.successMessages.length = 0; this.failureMessages.length = 0; this.exceptions.length = 0; - this.batchResponse = DEFAULT_RESPONSE; + this.batchResponse = DEFAULT_RESPONSE; } /** @@ -136,7 +154,7 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { record: EventSourceDataClassTypes, eventType: EventType ): SQSRecord | KinesisStreamRecord | DynamoDBRecord { - return this.DATA_CLASS_MAPPING[eventType](record); + return DATA_CLASS_MAPPING[eventType](record); } } diff --git a/packages/batch/src/BasePartialProcessor.ts b/packages/batch/src/BasePartialProcessor.ts index 2a158fbc3b..7812548c60 100644 --- a/packages/batch/src/BasePartialProcessor.ts +++ b/packages/batch/src/BasePartialProcessor.ts @@ -1,3 +1,6 @@ +/** + * Abstract class for batch processors + */ import { BaseRecord, EventSourceDataClassTypes, @@ -6,27 +9,31 @@ import { SuccessResponse, } from '.'; -/** - * Abstract class for batch processors - */ abstract class BasePartialProcessor { public exceptions: Error[]; public failureMessages: EventSourceDataClassTypes[]; - public handler: CallableFunction = new Function(); + public handler: CallableFunction; public records: BaseRecord[]; public successMessages: EventSourceDataClassTypes[]; + /** + * Initializes base processor class + */ public constructor() { this.successMessages = []; this.failureMessages = []; this.exceptions = []; this.records = []; + this.handler = new Function(); } + /** + * Clean class instance after processing + */ public abstract clean(): void; /** @@ -40,13 +47,16 @@ abstract class BasePartialProcessor { exception: Error ): FailureResponse { const entry: FailureResponse = ['fail', exception.message, record]; - console.debug("Record processing exception: " + exception.message); + console.debug('Record processing exception: ' + exception.message); this.exceptions.push(exception); this.failureMessages.push(record); return entry; } + /** + * Prepare class instance before processing + */ public abstract prepare(): void; /** @@ -62,6 +72,7 @@ abstract class BasePartialProcessor { } this.clean(); + return processedRecords; } @@ -74,14 +85,14 @@ abstract class BasePartialProcessor { ): Promise; /** - * Set instance attributes before execution + * Set class instance attributes before execution * @param records List of records to be processed * @param handler CallableFunction to process entries of "records" * @returns this object */ public register( records: BaseRecord[], - handler: CallableFunction, + handler: CallableFunction ): BasePartialProcessor { this.records = records; this.handler = handler; diff --git a/packages/batch/src/BatchProcessor.ts b/packages/batch/src/BatchProcessor.ts index 38bc5adbcb..c3ddf4d25b 100644 --- a/packages/batch/src/BatchProcessor.ts +++ b/packages/batch/src/BatchProcessor.ts @@ -1,3 +1,6 @@ +/** + * Process native partial responses from SQS, Kinesis Data Streams, and DynamoDB + */ import { BasePartialBatchProcessor, BaseRecord, @@ -6,12 +9,17 @@ import { } from '.'; class BatchProcessor extends BasePartialBatchProcessor { + /** + * Process a record with instance's handler + * @param record Batch record to be processed + * @returns response of success or failure + */ public async processRecord( record: BaseRecord ): Promise { try { const data = this.toBatchType(record, this.eventType); - let result = await this.handler(data); + const result = await this.handler(data); return this.successHandler(record, result); } catch (e) { diff --git a/packages/batch/src/constants.ts b/packages/batch/src/constants.ts index e525f78ea3..37b4093618 100644 --- a/packages/batch/src/constants.ts +++ b/packages/batch/src/constants.ts @@ -1,3 +1,9 @@ +/** + * Constants for batch processor classes + */ +import { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; +import { EventSourceDataClassTypes } from '.'; + enum EventType { SQS = 'SQS', KinesisDataStreams = 'KinesisDataStreams', @@ -8,4 +14,12 @@ const DEFAULT_RESPONSE: { [key: string]: { [key: string]: string }[] } = { batchItemFailures: [], }; -export { EventType, DEFAULT_RESPONSE }; +const DATA_CLASS_MAPPING = { + [EventType.SQS]: (record: EventSourceDataClassTypes) => record as SQSRecord, + [EventType.KinesisDataStreams]: (record: EventSourceDataClassTypes) => + record as KinesisStreamRecord, + [EventType.DynamoDBStreams]: (record: EventSourceDataClassTypes) => + record as DynamoDBRecord, +}; + +export { EventType, DEFAULT_RESPONSE, DATA_CLASS_MAPPING }; diff --git a/packages/batch/src/errors.ts b/packages/batch/src/errors.ts index 747d54939a..40c7129530 100644 --- a/packages/batch/src/errors.ts +++ b/packages/batch/src/errors.ts @@ -1,3 +1,7 @@ +/** + * Batch processing exceptions + */ + class BaseBatchProcessingError extends Error { public childExceptions: Error[]; @@ -10,21 +14,24 @@ class BaseBatchProcessingError extends Error { } public formatExceptions(parentExceptionString: string): string { - let exceptionList: string[] = [parentExceptionString + "\n"]; + const exceptionList: string[] = [parentExceptionString + '\n']; for (const exception of this.childExceptions) { exceptionList.push(exception.message); } - return "\n" + exceptionList; + return '\n' + exceptionList; } } +/** + * When all batch records failed to be processed + */ class BatchProcessingError extends BaseBatchProcessingError { public constructor(msg: string, childExceptions: Error[]) { super(msg, childExceptions); - let parentExceptionString: string = this.message; - this.message = this.formatExceptions(parentExceptionString) + const parentExceptionString: string = this.message; + this.message = this.formatExceptions(parentExceptionString); } } diff --git a/packages/batch/src/types.ts b/packages/batch/src/types.ts index 775c60cc71..a1d20ff399 100644 --- a/packages/batch/src/types.ts +++ b/packages/batch/src/types.ts @@ -1,3 +1,6 @@ +/** + * Types for batch processing utility + */ import { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; // types from base.py diff --git a/packages/batch/tests/helpers/factories.ts b/packages/batch/tests/helpers/factories.ts index de72110d9c..ff3d5b5a69 100644 --- a/packages/batch/tests/helpers/factories.ts +++ b/packages/batch/tests/helpers/factories.ts @@ -2,65 +2,68 @@ import { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; import { v4 } from 'uuid'; export const sqsEventFactory = (body: string): SQSRecord => { - return { - "messageId": v4(), - "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", - "body": body, - "attributes": { - "ApproximateReceiveCount": "1", - "SentTimestamp": "1545082649183", - "SenderId": "AIDAIENQZJOLO23YVJ4VO", - "ApproximateFirstReceiveTimestamp": "1545082649185", - }, - "messageAttributes": {}, - "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", - "eventSource": "aws:sqs", - "eventSourceARN": "arn:aws:sqs:us-east-2:123456789012:my-queue", - "awsRegion": "us-east-1", - } + return { + messageId: v4(), + receiptHandle: 'AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a', + body: body, + attributes: { + ApproximateReceiveCount: '1', + SentTimestamp: '1545082649183', + SenderId: 'AIDAIENQZJOLO23YVJ4VO', + ApproximateFirstReceiveTimestamp: '1545082649185', + }, + messageAttributes: {}, + md5OfBody: 'e4e68fb7bd0e697a0ae8f1bb342846b3', + eventSource: 'aws:sqs', + eventSourceARN: 'arn:aws:sqs:us-east-2:123456789012:my-queue', + awsRegion: 'us-east-1', + }; }; export const kinesisEventFactory = (body: string): KinesisStreamRecord => { - let seq: string = ""; - for (var i = 0; i < 52; i++) { - seq = seq + Math.floor(Math.random() * 10); - } - return { - "kinesis": { - "kinesisSchemaVersion": "1.0", - "partitionKey": "1", - "sequenceNumber": seq, - "data": body, - "approximateArrivalTimestamp": 1545084650.987, - }, - "eventSource": "aws:kinesis", - "eventVersion": "1.0", - "eventID": "shardId-000000000006:" + seq, - "eventName": "aws:kinesis:record", - "invokeIdentityArn": "arn:aws:iam::123456789012:role/lambda-role", - "awsRegion": "us-east-2", - "eventSourceARN": "arn:aws:kinesis:us-east-2:123456789012:stream/lambda-stream", - } + let seq = ''; + for (let i = 0; i < 52; i++) { + seq = seq + Math.floor(Math.random() * 10); + } + + return { + kinesis: { + kinesisSchemaVersion: '1.0', + partitionKey: '1', + sequenceNumber: seq, + data: body, + approximateArrivalTimestamp: 1545084650.987, + }, + eventSource: 'aws:kinesis', + eventVersion: '1.0', + eventID: 'shardId-000000000006:' + seq, + eventName: 'aws:kinesis:record', + invokeIdentityArn: 'arn:aws:iam::123456789012:role/lambda-role', + awsRegion: 'us-east-2', + eventSourceARN: + 'arn:aws:kinesis:us-east-2:123456789012:stream/lambda-stream', + }; }; export const dynamodbEventFactory = (body: string): DynamoDBRecord => { - let seq: string = ""; - for (var i = 0; i < 10; i++) { - seq = seq + Math.floor(Math.random() * 10); - } - return { - "eventID": "1", - "eventVersion": "1.0", - "dynamodb": { - "Keys": {"Id": {"N": "101"}}, - "NewImage": {"Message": {"S": body}}, - "StreamViewType": "NEW_AND_OLD_IMAGES", - "SequenceNumber": seq, - "SizeBytes": 26, - }, - "awsRegion": "us-west-2", - "eventName": "INSERT", - "eventSourceARN": "eventsource_arn", - "eventSource": "aws:dynamodb", - } -}; \ No newline at end of file + let seq = ''; + for (let i = 0; i < 10; i++) { + seq = seq + Math.floor(Math.random() * 10); + } + + return { + eventID: '1', + eventVersion: '1.0', + dynamodb: { + Keys: { Id: { N: '101' } }, + NewImage: { Message: { S: body } }, + StreamViewType: 'NEW_AND_OLD_IMAGES', + SequenceNumber: seq, + SizeBytes: 26, + }, + awsRegion: 'us-west-2', + eventName: 'INSERT', + eventSourceARN: 'eventsource_arn', + eventSource: 'aws:dynamodb', + }; +}; diff --git a/packages/batch/tests/helpers/handlers.ts b/packages/batch/tests/helpers/handlers.ts index 3c73d1a85b..3bacd04a1a 100644 --- a/packages/batch/tests/helpers/handlers.ts +++ b/packages/batch/tests/helpers/handlers.ts @@ -1,49 +1,61 @@ import { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; export const sqsRecordHandler = (record: SQSRecord): string => { - let body = record.body; - if (body.includes("fail")) { - throw Error("Failed to process record."); - } - return body; + const body = record.body; + if (body.includes('fail')) { + throw Error('Failed to process record.'); + } + + return body; }; -export const asyncSqsRecordHandler = async (record: SQSRecord): Promise => { - let body = record.body; - if (body.includes("fail")) { - throw Error("Failed to process record."); - } - return body; +export const asyncSqsRecordHandler = async ( + record: SQSRecord +): Promise => { + const body = record.body; + if (body.includes('fail')) { + throw Error('Failed to process record.'); + } + + return body; }; export const kinesisRecordHandler = (record: KinesisStreamRecord): string => { - let body = record.kinesis.data; - if (body.includes("fail")) { - throw Error("Failed to process record."); - } - return body; + const body = record.kinesis.data; + if (body.includes('fail')) { + throw Error('Failed to process record.'); + } + + return body; }; -export const asyncKinesisRecordHandler = async (record: KinesisStreamRecord): Promise => { - let body = record.kinesis.data; - if (body.includes("fail")) { - throw Error("Failed to process record."); - } - return body; +export const asyncKinesisRecordHandler = async ( + record: KinesisStreamRecord +): Promise => { + const body = record.kinesis.data; + if (body.includes('fail')) { + throw Error('Failed to process record.'); + } + + return body; }; export const dynamodbRecordHandler = (record: DynamoDBRecord): object => { - let body = record.dynamodb?.NewImage?.Message || {"S": "fail"}; - if (body["S"]?.includes("fail")) { - throw Error("Failed to process record."); - } - return body; + const body = record.dynamodb?.NewImage?.Message || { S: 'fail' }; + if (body['S']?.includes('fail')) { + throw Error('Failed to process record.'); + } + + return body; }; -export const asyncDynamodbRecordHandler = async (record: DynamoDBRecord): Promise => { - let body = await record.dynamodb?.NewImage?.Message || {"S": "fail"}; - if (body["S"]?.includes("fail")) { - throw Error("Failed to process record."); - } - return body; -}; \ No newline at end of file +export const asyncDynamodbRecordHandler = async ( + record: DynamoDBRecord +): Promise => { + const body = (await record.dynamodb?.NewImage?.Message) || { S: 'fail' }; + if (body['S']?.includes('fail')) { + throw Error('Failed to process record.'); + } + + return body; +}; diff --git a/packages/batch/tests/unit/BatchProcessor.test.ts b/packages/batch/tests/unit/BatchProcessor.test.ts index 06787f701d..4bdb0a95f9 100644 --- a/packages/batch/tests/unit/BatchProcessor.test.ts +++ b/packages/batch/tests/unit/BatchProcessor.test.ts @@ -4,22 +4,19 @@ * @group unit/batch/class/batchprocessor */ import { BatchProcessingError, BatchProcessor, EventType } from '../../src'; -import { DynamoDBRecord, - KinesisStreamRecord, - SQSRecord -} from 'aws-lambda'; -import { - sqsEventFactory, - kinesisEventFactory, - dynamodbEventFactory +import { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; +import { + sqsEventFactory, + kinesisEventFactory, + dynamodbEventFactory, } from '../../tests/helpers/factories'; -import { - sqsRecordHandler, - asyncSqsRecordHandler, - kinesisRecordHandler, - asyncKinesisRecordHandler, - dynamodbRecordHandler, - asyncDynamodbRecordHandler +import { + sqsRecordHandler, + asyncSqsRecordHandler, + kinesisRecordHandler, + asyncKinesisRecordHandler, + dynamodbRecordHandler, + asyncDynamodbRecordHandler, } from '../../tests/helpers/handlers'; describe('Class: BatchProcessor', () => { @@ -36,353 +33,445 @@ describe('Class: BatchProcessor', () => { }); describe('Synchronously processing SQS Records', () => { - test('Batch processing SQS records with no failures', async () => { + test('Batch processing SQS records with no failures', async () => { // Prepare - let firstRecord = sqsEventFactory("success"); - let secondRecord = sqsEventFactory("success"); - let records = [firstRecord, secondRecord]; - let processor = new BatchProcessor(EventType.SQS); + const firstRecord = sqsEventFactory('success'); + const secondRecord = sqsEventFactory('success'); + const records = [firstRecord, secondRecord]; + const processor = new BatchProcessor(EventType.SQS); // Act processor.register(records, sqsRecordHandler); - let processedMessages = await processor.process(); + const processedMessages = await processor.process(); // Assess expect(processedMessages).toStrictEqual([ - ["success", (firstRecord as SQSRecord).body, firstRecord], - ["success", (secondRecord as SQSRecord).body, secondRecord] + ['success', (firstRecord as SQSRecord).body, firstRecord], + ['success', (secondRecord as SQSRecord).body, secondRecord], ]); }); - + test('Batch processing SQS records with some failures', async () => { // Prepare - let firstRecord = sqsEventFactory("failure"); - let secondRecord = sqsEventFactory("success"); - let thirdRecord = sqsEventFactory("fail"); - let records = [firstRecord, secondRecord, thirdRecord]; - let processor = new BatchProcessor(EventType.SQS); + const firstRecord = sqsEventFactory('failure'); + const secondRecord = sqsEventFactory('success'); + const thirdRecord = sqsEventFactory('fail'); + const records = [firstRecord, secondRecord, thirdRecord]; + const processor = new BatchProcessor(EventType.SQS); // Act processor.register(records, sqsRecordHandler); - let processedMessages = await processor.process(); + const processedMessages = await processor.process(); - // Asses - expect(processedMessages[1]).toStrictEqual(["success", (secondRecord as SQSRecord).body, secondRecord]); + // Assess + expect(processedMessages[1]).toStrictEqual([ + 'success', + (secondRecord as SQSRecord).body, + secondRecord, + ]); expect(processor.failureMessages.length).toBe(2); - expect(processor.response()).toStrictEqual( - {"batchItemFailures": - [{"itemIdentifier": (firstRecord as SQSRecord).messageId}, - {"itemIdentifier": (thirdRecord as SQSRecord).messageId}] - } - ); + expect(processor.response()).toStrictEqual({ + batchItemFailures: [ + { itemIdentifier: (firstRecord as SQSRecord).messageId }, + { itemIdentifier: (thirdRecord as SQSRecord).messageId }, + ], + }); }); - + test('Batch processing SQS records with all failures', async () => { // Prepare - let firstRecord = sqsEventFactory("failure"); - let secondRecord = sqsEventFactory("failure"); - let thirdRecord = sqsEventFactory("fail"); + const firstRecord = sqsEventFactory('failure'); + const secondRecord = sqsEventFactory('failure'); + const thirdRecord = sqsEventFactory('fail'); - let records = [firstRecord, secondRecord, thirdRecord]; - let processor = new BatchProcessor(EventType.SQS); + const records = [firstRecord, secondRecord, thirdRecord]; + const processor = new BatchProcessor(EventType.SQS); // Act & Assess processor.register(records, sqsRecordHandler); - await expect(processor.process()).rejects.toThrowError(BatchProcessingError); + await expect(processor.process()).rejects.toThrowError( + BatchProcessingError + ); }); }); describe('Asynchronously processing SQS Records', () => { test('Batch processing SQS records with no failures', async () => { // Prepare - let firstRecord = sqsEventFactory("success"); - let secondRecord = sqsEventFactory("success"); - let records = [firstRecord, secondRecord]; - let processor = new BatchProcessor(EventType.SQS); + const firstRecord = sqsEventFactory('success'); + const secondRecord = sqsEventFactory('success'); + const records = [firstRecord, secondRecord]; + const processor = new BatchProcessor(EventType.SQS); // Act processor.register(records, asyncSqsRecordHandler); - let processedMessages = await processor.process(); + const processedMessages = await processor.process(); // Assess expect(processedMessages).toStrictEqual([ - ["success", (firstRecord as SQSRecord).body, firstRecord], - ["success", (secondRecord as SQSRecord).body, secondRecord] + ['success', (firstRecord as SQSRecord).body, firstRecord], + ['success', (secondRecord as SQSRecord).body, secondRecord], ]); }); test('Batch processing SQS records with failures', async () => { // Prepare - let firstRecord = sqsEventFactory("failure"); - let secondRecord = sqsEventFactory("success"); - let thirdRecord = sqsEventFactory("fail"); - let records = [firstRecord, secondRecord, thirdRecord]; - let processor = new BatchProcessor(EventType.SQS); + const firstRecord = sqsEventFactory('failure'); + const secondRecord = sqsEventFactory('success'); + const thirdRecord = sqsEventFactory('fail'); + const records = [firstRecord, secondRecord, thirdRecord]; + const processor = new BatchProcessor(EventType.SQS); // Act processor.register(records, asyncSqsRecordHandler); - let processedMessages = await processor.process(); + const processedMessages = await processor.process(); // Assess - expect(processedMessages[1]).toStrictEqual(["success", (secondRecord as SQSRecord).body, secondRecord]); + expect(processedMessages[1]).toStrictEqual([ + 'success', + (secondRecord as SQSRecord).body, + secondRecord, + ]); expect(processor.failureMessages.length).toBe(2); - expect(processor.response()).toStrictEqual( - {"batchItemFailures": - [{"itemIdentifier": (firstRecord as SQSRecord).messageId}, - {"itemIdentifier": (thirdRecord as SQSRecord).messageId}] - } - ); + expect(processor.response()).toStrictEqual({ + batchItemFailures: [ + { itemIdentifier: (firstRecord as SQSRecord).messageId }, + { itemIdentifier: (thirdRecord as SQSRecord).messageId }, + ], + }); }); test('Batch processing SQS records with all failures', async () => { // Prepare - let firstRecord = sqsEventFactory("failure"); - let secondRecord = sqsEventFactory("failure"); - let thirdRecord = sqsEventFactory("fail"); + const firstRecord = sqsEventFactory('failure'); + const secondRecord = sqsEventFactory('failure'); + const thirdRecord = sqsEventFactory('fail'); - let records = [firstRecord, secondRecord, thirdRecord]; - let processor = new BatchProcessor(EventType.SQS); + const records = [firstRecord, secondRecord, thirdRecord]; + const processor = new BatchProcessor(EventType.SQS); // Act processor.register(records, asyncSqsRecordHandler); // Assess - await expect(processor.process()).rejects.toThrowError(BatchProcessingError); + await expect(processor.process()).rejects.toThrowError( + BatchProcessingError + ); }); }); describe('Synchronously processing Kinesis Records', () => { test('Batch processing Kinesis records with no failures', async () => { // Prepare - let firstRecord = kinesisEventFactory("success"); - let secondRecord = kinesisEventFactory("success"); - let records = [firstRecord, secondRecord]; - let processor = new BatchProcessor(EventType.KinesisDataStreams); + const firstRecord = kinesisEventFactory('success'); + const secondRecord = kinesisEventFactory('success'); + const records = [firstRecord, secondRecord]; + const processor = new BatchProcessor(EventType.KinesisDataStreams); // Act processor.register(records, kinesisRecordHandler); - let processedMessages = await processor.process(); + const processedMessages = await processor.process(); // Assess expect(processedMessages).toStrictEqual([ - ["success", (firstRecord as KinesisStreamRecord).kinesis.data, firstRecord], - ["success", (secondRecord as KinesisStreamRecord).kinesis.data, secondRecord] + [ + 'success', + (firstRecord as KinesisStreamRecord).kinesis.data, + firstRecord, + ], + [ + 'success', + (secondRecord as KinesisStreamRecord).kinesis.data, + secondRecord, + ], ]); }); test('Batch processing Kinesis records with failures', async () => { // Prepare - let firstRecord = kinesisEventFactory("failure"); - let secondRecord = kinesisEventFactory("success"); - let thirdRecord = kinesisEventFactory("fail"); - let records = [firstRecord, secondRecord, thirdRecord]; - let processor = new BatchProcessor(EventType.KinesisDataStreams); + const firstRecord = kinesisEventFactory('failure'); + const secondRecord = kinesisEventFactory('success'); + const thirdRecord = kinesisEventFactory('fail'); + const records = [firstRecord, secondRecord, thirdRecord]; + const processor = new BatchProcessor(EventType.KinesisDataStreams); // Act processor.register(records, kinesisRecordHandler); - let processedMessages = await processor.process(); + const processedMessages = await processor.process(); // Assess - expect(processedMessages[1]).toStrictEqual(["success", (secondRecord as KinesisStreamRecord).kinesis.data, secondRecord]); + expect(processedMessages[1]).toStrictEqual([ + 'success', + (secondRecord as KinesisStreamRecord).kinesis.data, + secondRecord, + ]); expect(processor.failureMessages.length).toBe(2); - expect(processor.response()).toStrictEqual( - {"batchItemFailures": - [{"itemIdentifier": (firstRecord as KinesisStreamRecord).kinesis.sequenceNumber}, - {"itemIdentifier": (thirdRecord as KinesisStreamRecord).kinesis.sequenceNumber}] - } - ); + expect(processor.response()).toStrictEqual({ + batchItemFailures: [ + { + itemIdentifier: (firstRecord as KinesisStreamRecord).kinesis + .sequenceNumber, + }, + { + itemIdentifier: (thirdRecord as KinesisStreamRecord).kinesis + .sequenceNumber, + }, + ], + }); }); test('Batch processing Kinesis records with all failures', async () => { - let firstRecord = kinesisEventFactory("failure"); - let secondRecord = kinesisEventFactory("failure"); - let thirdRecord = kinesisEventFactory("fail"); + const firstRecord = kinesisEventFactory('failure'); + const secondRecord = kinesisEventFactory('failure'); + const thirdRecord = kinesisEventFactory('fail'); - let records = [firstRecord, secondRecord, thirdRecord]; - let processor = new BatchProcessor(EventType.KinesisDataStreams); + const records = [firstRecord, secondRecord, thirdRecord]; + const processor = new BatchProcessor(EventType.KinesisDataStreams); // Act processor.register(records, kinesisRecordHandler); // Assess - await expect(processor.process()).rejects.toThrowError(BatchProcessingError); + await expect(processor.process()).rejects.toThrowError( + BatchProcessingError + ); }); }); describe('Asynchronously processing Kinesis Records', () => { test('Batch processing Kinesis records with no failures', async () => { // Prepare - let firstRecord = kinesisEventFactory("success"); - let secondRecord = kinesisEventFactory("success"); - let records = [firstRecord, secondRecord]; - let processor = new BatchProcessor(EventType.KinesisDataStreams); + const firstRecord = kinesisEventFactory('success'); + const secondRecord = kinesisEventFactory('success'); + const records = [firstRecord, secondRecord]; + const processor = new BatchProcessor(EventType.KinesisDataStreams); // Act processor.register(records, asyncKinesisRecordHandler); - let processedMessages = await processor.process(); + const processedMessages = await processor.process(); // Assess expect(processedMessages).toStrictEqual([ - ["success", (firstRecord as KinesisStreamRecord).kinesis.data, firstRecord], - ["success", (secondRecord as KinesisStreamRecord).kinesis.data, secondRecord] + [ + 'success', + (firstRecord as KinesisStreamRecord).kinesis.data, + firstRecord, + ], + [ + 'success', + (secondRecord as KinesisStreamRecord).kinesis.data, + secondRecord, + ], ]); }); test('Batch processing Kinesis records with failures', async () => { // Prepare - let firstRecord = kinesisEventFactory("failure"); - let secondRecord = kinesisEventFactory("success"); - let thirdRecord = kinesisEventFactory("fail"); - let records = [firstRecord, secondRecord, thirdRecord]; - let processor = new BatchProcessor(EventType.KinesisDataStreams); + const firstRecord = kinesisEventFactory('failure'); + const secondRecord = kinesisEventFactory('success'); + const thirdRecord = kinesisEventFactory('fail'); + const records = [firstRecord, secondRecord, thirdRecord]; + const processor = new BatchProcessor(EventType.KinesisDataStreams); // Act processor.register(records, asyncKinesisRecordHandler); - let processedMessages = await processor.process(); + const processedMessages = await processor.process(); // Assess - expect(processedMessages[1]).toStrictEqual(["success", (secondRecord as KinesisStreamRecord).kinesis.data, secondRecord]); + expect(processedMessages[1]).toStrictEqual([ + 'success', + (secondRecord as KinesisStreamRecord).kinesis.data, + secondRecord, + ]); expect(processor.failureMessages.length).toBe(2); - expect(processor.response()).toStrictEqual( - {"batchItemFailures": - [{"itemIdentifier": (firstRecord as KinesisStreamRecord).kinesis.sequenceNumber}, - {"itemIdentifier": (thirdRecord as KinesisStreamRecord).kinesis.sequenceNumber}] - } - ); + expect(processor.response()).toStrictEqual({ + batchItemFailures: [ + { + itemIdentifier: (firstRecord as KinesisStreamRecord).kinesis + .sequenceNumber, + }, + { + itemIdentifier: (thirdRecord as KinesisStreamRecord).kinesis + .sequenceNumber, + }, + ], + }); }); test('Batch processing Kinesis records with all failures', async () => { // Prepare - let firstRecord = kinesisEventFactory("failure"); - let secondRecord = kinesisEventFactory("failure"); - let thirdRecord = kinesisEventFactory("fail"); + const firstRecord = kinesisEventFactory('failure'); + const secondRecord = kinesisEventFactory('failure'); + const thirdRecord = kinesisEventFactory('fail'); - let records = [firstRecord, secondRecord, thirdRecord]; - let processor = new BatchProcessor(EventType.KinesisDataStreams); + const records = [firstRecord, secondRecord, thirdRecord]; + const processor = new BatchProcessor(EventType.KinesisDataStreams); // Act processor.register(records, asyncKinesisRecordHandler); // Assess - await expect(processor.process()).rejects.toThrowError(BatchProcessingError); + await expect(processor.process()).rejects.toThrowError( + BatchProcessingError + ); }); - }); + }); describe('Synchronously processing DynamoDB Records', () => { test('Batch processing DynamoDB records with no failures', async () => { // Prepare - let firstRecord = dynamodbEventFactory("success"); - let secondRecord = dynamodbEventFactory("success"); - let records = [firstRecord, secondRecord]; - let processor = new BatchProcessor(EventType.DynamoDBStreams); + const firstRecord = dynamodbEventFactory('success'); + const secondRecord = dynamodbEventFactory('success'); + const records = [firstRecord, secondRecord]; + const processor = new BatchProcessor(EventType.DynamoDBStreams); // Act processor.register(records, dynamodbRecordHandler); - let processedMessages = await processor.process(); + const processedMessages = await processor.process(); // Assess expect(processedMessages).toStrictEqual([ - ["success", (firstRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, firstRecord], - ["success", (secondRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, secondRecord] + [ + 'success', + (firstRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, + firstRecord, + ], + [ + 'success', + (secondRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, + secondRecord, + ], ]); }); test('Batch processing DynamoDB records with failures', async () => { // Prepare - let firstRecord = dynamodbEventFactory("failure"); - let secondRecord = dynamodbEventFactory("success"); - let thirdRecord = dynamodbEventFactory("fail"); - let records = [firstRecord, secondRecord, thirdRecord]; - let processor = new BatchProcessor(EventType.DynamoDBStreams); + const firstRecord = dynamodbEventFactory('failure'); + const secondRecord = dynamodbEventFactory('success'); + const thirdRecord = dynamodbEventFactory('fail'); + const records = [firstRecord, secondRecord, thirdRecord]; + const processor = new BatchProcessor(EventType.DynamoDBStreams); // Act processor.register(records, dynamodbRecordHandler); - let processedMessages = await processor.process(); + const processedMessages = await processor.process(); // Assess - expect(processedMessages[1]).toStrictEqual(["success", (secondRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, secondRecord]); + expect(processedMessages[1]).toStrictEqual([ + 'success', + (secondRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, + secondRecord, + ]); expect(processor.failureMessages.length).toBe(2); - expect(processor.response()).toStrictEqual( - {"batchItemFailures": - [{"itemIdentifier": (firstRecord as DynamoDBRecord).dynamodb?.SequenceNumber}, - {"itemIdentifier": (thirdRecord as DynamoDBRecord).dynamodb?.SequenceNumber}] - } - ); + expect(processor.response()).toStrictEqual({ + batchItemFailures: [ + { + itemIdentifier: (firstRecord as DynamoDBRecord).dynamodb + ?.SequenceNumber, + }, + { + itemIdentifier: (thirdRecord as DynamoDBRecord).dynamodb + ?.SequenceNumber, + }, + ], + }); }); test('Batch processing DynamoDB records with all failures', async () => { // Prepare - let firstRecord = dynamodbEventFactory("failure"); - let secondRecord = dynamodbEventFactory("failure"); - let thirdRecord = dynamodbEventFactory("fail"); + const firstRecord = dynamodbEventFactory('failure'); + const secondRecord = dynamodbEventFactory('failure'); + const thirdRecord = dynamodbEventFactory('fail'); - let records = [firstRecord, secondRecord, thirdRecord]; - let processor = new BatchProcessor(EventType.DynamoDBStreams); + const records = [firstRecord, secondRecord, thirdRecord]; + const processor = new BatchProcessor(EventType.DynamoDBStreams); // Act processor.register(records, dynamodbRecordHandler); // Assess - await expect(processor.process()).rejects.toThrowError(BatchProcessingError); + await expect(processor.process()).rejects.toThrowError( + BatchProcessingError + ); }); }); describe('Asynchronously processing DynamoDB Records', () => { test('Batch processing DynamoDB records with no failures', async () => { // Prepare - let firstRecord = dynamodbEventFactory("success"); - let secondRecord = dynamodbEventFactory("success"); - let records = [firstRecord, secondRecord]; - let processor = new BatchProcessor(EventType.DynamoDBStreams); + const firstRecord = dynamodbEventFactory('success'); + const secondRecord = dynamodbEventFactory('success'); + const records = [firstRecord, secondRecord]; + const processor = new BatchProcessor(EventType.DynamoDBStreams); // Act processor.register(records, asyncDynamodbRecordHandler); - let processedMessages = await processor.process(); + const processedMessages = await processor.process(); // Assess expect(processedMessages).toStrictEqual([ - ["success", (firstRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, firstRecord], - ["success", (secondRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, secondRecord] + [ + 'success', + (firstRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, + firstRecord, + ], + [ + 'success', + (secondRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, + secondRecord, + ], ]); }); test('Batch processing DynamoDB records with failures', async () => { // Prepare - let firstRecord = dynamodbEventFactory("failure"); - let secondRecord = dynamodbEventFactory("success"); - let thirdRecord = dynamodbEventFactory("fail"); - let records = [firstRecord, secondRecord, thirdRecord]; - let processor = new BatchProcessor(EventType.DynamoDBStreams); + const firstRecord = dynamodbEventFactory('failure'); + const secondRecord = dynamodbEventFactory('success'); + const thirdRecord = dynamodbEventFactory('fail'); + const records = [firstRecord, secondRecord, thirdRecord]; + const processor = new BatchProcessor(EventType.DynamoDBStreams); // Act processor.register(records, asyncDynamodbRecordHandler); - let processedMessages = await processor.process(); + const processedMessages = await processor.process(); // Assess - expect(processedMessages[1]).toStrictEqual(["success", (secondRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, secondRecord]); + expect(processedMessages[1]).toStrictEqual([ + 'success', + (secondRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, + secondRecord, + ]); expect(processor.failureMessages.length).toBe(2); - expect(processor.response()).toStrictEqual( - {"batchItemFailures": - [{"itemIdentifier": (firstRecord as DynamoDBRecord).dynamodb?.SequenceNumber}, - {"itemIdentifier": (thirdRecord as DynamoDBRecord).dynamodb?.SequenceNumber}] - } - ); + expect(processor.response()).toStrictEqual({ + batchItemFailures: [ + { + itemIdentifier: (firstRecord as DynamoDBRecord).dynamodb + ?.SequenceNumber, + }, + { + itemIdentifier: (thirdRecord as DynamoDBRecord).dynamodb + ?.SequenceNumber, + }, + ], + }); }); test('Batch processing DynamoDB records with all failures', async () => { // Prepare - let firstRecord = dynamodbEventFactory("failure"); - let secondRecord = dynamodbEventFactory("failure"); - let thirdRecord = dynamodbEventFactory("fail"); + const firstRecord = dynamodbEventFactory('failure'); + const secondRecord = dynamodbEventFactory('failure'); + const thirdRecord = dynamodbEventFactory('fail'); - let records = [firstRecord, secondRecord, thirdRecord]; - let processor = new BatchProcessor(EventType.DynamoDBStreams); + const records = [firstRecord, secondRecord, thirdRecord]; + const processor = new BatchProcessor(EventType.DynamoDBStreams); // Act processor.register(records, asyncDynamodbRecordHandler); // Assess - await expect(processor.process()).rejects.toThrowError(BatchProcessingError); + await expect(processor.process()).rejects.toThrowError( + BatchProcessingError + ); }); }); }); From d417d7136e467a188eae674f81b4d1d8d8e983f6 Mon Sep 17 00:00:00 2001 From: erikayao93 Date: Fri, 7 Jul 2023 17:29:20 +0000 Subject: [PATCH 08/24] Added response and identifier typings --- .../batch/src/BasePartialBatchProcessor.ts | 26 +++++++++---------- packages/batch/src/BasePartialProcessor.ts | 2 +- packages/batch/src/constants.ts | 4 +-- packages/batch/src/types.ts | 5 ++++ 4 files changed, 21 insertions(+), 16 deletions(-) diff --git a/packages/batch/src/BasePartialBatchProcessor.ts b/packages/batch/src/BasePartialBatchProcessor.ts index 873e23dbe4..ecf31f7f20 100644 --- a/packages/batch/src/BasePartialBatchProcessor.ts +++ b/packages/batch/src/BasePartialBatchProcessor.ts @@ -9,12 +9,14 @@ import { DEFAULT_RESPONSE, EventSourceDataClassTypes, EventType, + ItemIdentifier, + BatchResponse, } from '.'; abstract class BasePartialBatchProcessor extends BasePartialProcessor { public COLLECTOR_MAPPING; - public batchResponse: { [key: string]: { [key: string]: string }[] }; + public batchResponse: BatchResponse; public eventType: EventType; @@ -50,7 +52,7 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { ); } - const messages: { [key: string]: string }[] = this.getMessagesToReport(); + const messages: ItemIdentifier[] = this.getMessagesToReport(); this.batchResponse = { batchItemFailures: messages }; } @@ -58,8 +60,8 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { * Collects identifiers of failed items for a DynamoDB stream * @returns list of identifiers for failed items */ - public collectDynamoDBFailures(): { [key: string]: string }[] { - const failures: { [key: string]: string }[] = []; + public collectDynamoDBFailures(): ItemIdentifier[] { + const failures: ItemIdentifier[] = []; for (const msg of this.failureMessages) { const msgId = (msg as DynamoDBRecord).dynamodb?.SequenceNumber; @@ -75,8 +77,8 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { * Collects identifiers of failed items for a Kinesis stream * @returns list of identifiers for failed items */ - public collectKinesisFailures(): { [key: string]: string }[] { - const failures: { [key: string]: string }[] = []; + public collectKinesisFailures(): ItemIdentifier[] { + const failures: ItemIdentifier[] = []; for (const msg of this.failureMessages) { const msgId = (msg as KinesisStreamRecord).kinesis.sequenceNumber; @@ -90,8 +92,8 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { * Collects identifiers of failed items for an SQS batch * @returns list of identifiers for failed items */ - public collectSqsFailures(): { [key: string]: string }[] { - const failures: { [key: string]: string }[] = []; + public collectSqsFailures(): ItemIdentifier[] { + const failures: ItemIdentifier[] = []; for (const msg of this.failureMessages) { const msgId = (msg as SQSRecord).messageId; @@ -113,7 +115,7 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { * Collects identifiers for failed batch items * @returns formatted messages to use in batch deletion */ - public getMessagesToReport(): { [key: string]: string }[] { + public getMessagesToReport(): ItemIdentifier[] { return this.COLLECTOR_MAPPING[this.eventType](); } @@ -126,9 +128,7 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { return true; } - console.debug( - 'All ' + this.successMessages.length + ' records successfully processed' - ); + // console.debug('All ' + this.successMessages.length + ' records successfully processed'); return false; } @@ -146,7 +146,7 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { /** * @returns Batch items that failed processing, if any */ - public response(): { [key: string]: { [key: string]: string }[] } { + public response(): BatchResponse { return this.batchResponse; } diff --git a/packages/batch/src/BasePartialProcessor.ts b/packages/batch/src/BasePartialProcessor.ts index 7812548c60..00bf1bcdd2 100644 --- a/packages/batch/src/BasePartialProcessor.ts +++ b/packages/batch/src/BasePartialProcessor.ts @@ -47,7 +47,7 @@ abstract class BasePartialProcessor { exception: Error ): FailureResponse { const entry: FailureResponse = ['fail', exception.message, record]; - console.debug('Record processing exception: ' + exception.message); + // console.debug('Record processing exception: ' + exception.message); this.exceptions.push(exception); this.failureMessages.push(record); diff --git a/packages/batch/src/constants.ts b/packages/batch/src/constants.ts index 37b4093618..b8335520a3 100644 --- a/packages/batch/src/constants.ts +++ b/packages/batch/src/constants.ts @@ -2,7 +2,7 @@ * Constants for batch processor classes */ import { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; -import { EventSourceDataClassTypes } from '.'; +import { BatchResponse, EventSourceDataClassTypes } from '.'; enum EventType { SQS = 'SQS', @@ -10,7 +10,7 @@ enum EventType { DynamoDBStreams = 'DynamoDBStreams', } -const DEFAULT_RESPONSE: { [key: string]: { [key: string]: string }[] } = { +const DEFAULT_RESPONSE: BatchResponse = { batchItemFailures: [], }; diff --git a/packages/batch/src/types.ts b/packages/batch/src/types.ts index a1d20ff399..a89129199d 100644 --- a/packages/batch/src/types.ts +++ b/packages/batch/src/types.ts @@ -17,10 +17,15 @@ type SuccessResponse = [string, ResultType, EventSourceDataClassTypes]; type FailureResponse = [string, string, EventSourceDataClassTypes]; +type ItemIdentifier = { [key: string]: string }; +type BatchResponse = { [key: string]: ItemIdentifier[] }; + export type { BaseRecord, EventSourceDataClassTypes, ResultType, SuccessResponse, FailureResponse, + ItemIdentifier, + BatchResponse, }; From 863edc8176fb19aa64359fe4c0c60faf48da62e3 Mon Sep 17 00:00:00 2001 From: Andrea Amorosi Date: Mon, 10 Jul 2023 10:02:30 +0200 Subject: [PATCH 09/24] test(idempotency): improve integration tests for utility (#1591) * docs: new name * chore: rename e2e files * tests(idempotency): expand integration tests * chore(idempotency): remove unreachable code --- .github/workflows/run-e2e-tests.yml | 2 +- packages/idempotency/README.md | 95 ++++- packages/idempotency/src/makeIdempotent.ts | 2 +- .../src/middleware/makeHandlerIdempotent.ts | 137 ++++-- .../BasePersistenceLayerInterface.ts | 3 +- ...akeFunctionIdempotent.test.FunctionCode.ts | 74 ---- .../tests/e2e/makeFunctionIdempotent.test.ts | 203 --------- ...makeHandlerIdempotent.test.FunctionCode.ts | 119 ++++++ .../tests/e2e/makeHandlerIdempotent.test.ts | 390 ++++++++++++++++++ .../e2e/makeIdempotent.test.FunctionCode.ts | 105 +++++ .../tests/e2e/makeIdempotent.test.ts | 336 +++++++++++++++ .../tests/helpers/idempotencyUtils.ts | 7 +- .../tests/unit/makeIdempotent.test.ts | 2 +- 13 files changed, 1145 insertions(+), 330 deletions(-) delete mode 100644 packages/idempotency/tests/e2e/makeFunctionIdempotent.test.FunctionCode.ts delete mode 100644 packages/idempotency/tests/e2e/makeFunctionIdempotent.test.ts create mode 100644 packages/idempotency/tests/e2e/makeHandlerIdempotent.test.FunctionCode.ts create mode 100644 packages/idempotency/tests/e2e/makeHandlerIdempotent.test.ts create mode 100644 packages/idempotency/tests/e2e/makeIdempotent.test.FunctionCode.ts create mode 100644 packages/idempotency/tests/e2e/makeIdempotent.test.ts diff --git a/.github/workflows/run-e2e-tests.yml b/.github/workflows/run-e2e-tests.yml index 1950c89b84..fb4186c420 100644 --- a/.github/workflows/run-e2e-tests.yml +++ b/.github/workflows/run-e2e-tests.yml @@ -19,7 +19,7 @@ jobs: contents: read strategy: matrix: - package: [logger, metrics, tracer, parameters] + package: [logger, metrics, tracer, parameters, idempotency] version: [14, 16, 18] fail-fast: false steps: diff --git a/packages/idempotency/README.md b/packages/idempotency/README.md index 5fb4acd204..15efa33f7d 100644 --- a/packages/idempotency/README.md +++ b/packages/idempotency/README.md @@ -53,12 +53,37 @@ Next, review the IAM permissions attached to your AWS Lambda function and make s ### Function wrapper -You can make any function idempotent, and safe to retry, by wrapping it using the `makeFunctionIdempotent` higher-order function. +You can make any function idempotent, and safe to retry, by wrapping it using the `makeIdempotent` higher-order function. The function wrapper takes a reference to the function to be made idempotent as first argument, and an object with options as second argument. +When you wrap your Lambda handler function, the utility uses the content of the `event` parameter to handle the idempotency logic. + ```ts -import { makeFunctionIdempotent } from '@aws-lambda-powertools/idempotency'; +import { makeIdempotent } from '@aws-lambda-powertools/idempotency'; +import { DynamoDBPersistenceLayer } from '@aws-lambda-powertools/idempotency/dynamodb'; +import type { Context, APIGatewayProxyEvent } from 'aws-lambda'; + +const persistenceStore = new DynamoDBPersistenceLayer({ + tableName: 'idempotencyTableName', +}); + +const myHandler = async ( + event: APIGatewayProxyEvent, + _context: Context +): Promise => { + // your code goes here here +}; + +export const handler = makeIdempotent(myHandler, { + persistenceStore, +}); +``` + +You can also use the `makeIdempotent` function to wrap any other arbitrary function, not just Lambda handlers. + +```ts +import { makeIdempotent } from '@aws-lambda-powertools/idempotency'; import { DynamoDBPersistenceLayer } from '@aws-lambda-powertools/idempotency/dynamodb'; import type { Context, SQSEvent, SQSRecord } from 'aws-lambda'; @@ -70,20 +95,76 @@ const processingFunction = async (payload: SQSRecord): Promise => { // your code goes here here }; +const processIdempotently = makeIdempotent(processingFunction, { + persistenceStore, +}); + export const handler = async ( event: SQSEvent, _context: Context ): Promise => { for (const record of event.Records) { - await makeFunctionIdempotent(processingFunction, { - dataKeywordArgument: 'transactionId', - persistenceStore, - }); + await processIdempotently(record); } }; ``` -Note that we are specifying a `dataKeywordArgument` option, this tells the Idempotency utility which field(s) will be used as idempotency key. +If your function has multiple arguments, you can use the `dataIndexArgument` option to specify which argument should be used as the idempotency key. + +```ts +import { makeIdempotent } from '@aws-lambda-powertools/idempotency'; +import { DynamoDBPersistenceLayer } from '@aws-lambda-powertools/idempotency/dynamodb'; +import type { Context, SQSEvent, SQSRecord } from 'aws-lambda'; + +const persistenceStore = new DynamoDBPersistenceLayer({ + tableName: 'idempotencyTableName', +}); + +const processingFunction = async (payload: SQSRecord, customerId: string): Promise => { + // your code goes here here +}; + +const processIdempotently = makeIdempotent(processingFunction, { + persistenceStore, + // this tells the utility to use the second argument (`customerId`) as the idempotency key + dataIndexArgument: 1, +}); + +export const handler = async ( + event: SQSEvent, + _context: Context +): Promise => { + for (const record of event.Records) { + await processIdempotently(record, 'customer-123'); + } +}; +``` + +Note that you can also specify a JMESPath expression in the Idempotency config object to select a subset of the event payload as the idempotency key. This is useful when dealing with payloads that contain timestamps or request ids. + +```ts +import { makeIdempotent, IdempotencyConfig } from '@aws-lambda-powertools/idempotency'; +import { DynamoDBPersistenceLayer } from '@aws-lambda-powertools/idempotency/dynamodb'; +import type { Context, APIGatewayProxyEvent } from 'aws-lambda'; + +const persistenceStore = new DynamoDBPersistenceLayer({ + tableName: 'idempotencyTableName', +}); + +const myHandler = async ( + event: APIGatewayProxyEvent, + _context: Context +): Promise => { + // your code goes here here +}; + +export const handler = makeIdempotent(myHandler, { + persistenceStore, + config: new IdempotencyConfig({ + eventKeyJmespath: 'requestContext.identity.user', + }), +}); +``` Check the [docs](https://docs.powertools.aws.dev/lambda/typescript/latest/utilities/idempotency/) for more examples. diff --git a/packages/idempotency/src/makeIdempotent.ts b/packages/idempotency/src/makeIdempotent.ts index a44079d493..8bae3cdf6d 100644 --- a/packages/idempotency/src/makeIdempotent.ts +++ b/packages/idempotency/src/makeIdempotent.ts @@ -53,7 +53,7 @@ const isOptionsWithDataIndexArgument = ( * }; * * // we use wrapper to make processing function idempotent with DynamoDBPersistenceLayer - * const processIdempotently = makeFunctionIdempotent(processRecord, { + * const processIdempotently = makeIdempotent(processRecord, { * persistenceStore: new DynamoDBPersistenceLayer() * dataKeywordArgument: 'transactionId', // keyword argument to hash the payload and the result * }); diff --git a/packages/idempotency/src/middleware/makeHandlerIdempotent.ts b/packages/idempotency/src/middleware/makeHandlerIdempotent.ts index 153e19153b..c9e750e53c 100644 --- a/packages/idempotency/src/middleware/makeHandlerIdempotent.ts +++ b/packages/idempotency/src/middleware/makeHandlerIdempotent.ts @@ -1,6 +1,9 @@ import { IdempotencyHandler } from '../IdempotencyHandler'; import { IdempotencyConfig } from '../IdempotencyConfig'; -import { cleanupMiddlewares } from '@aws-lambda-powertools/commons/lib/middleware'; +import { + cleanupMiddlewares, + IDEMPOTENCY_KEY, +} from '@aws-lambda-powertools/commons/lib/middleware'; import { IdempotencyInconsistentStateError, IdempotencyItemAlreadyExistsError, @@ -9,33 +12,87 @@ import { import { IdempotencyRecord } from '../persistence'; import { MAX_RETRIES } from '../constants'; import type { IdempotencyLambdaHandlerOptions } from '../types'; +import type { BasePersistenceLayerInterface } from '../persistence'; import { MiddlewareLikeObj, MiddyLikeRequest, JSONValue, } from '@aws-lambda-powertools/commons'; +/** + * @internal + * Utility function to get the persistence store from the request internal storage + * + * @param request The Middy request object + * @returns The persistence store from the request internal + */ +const getPersistenceStoreFromRequestInternal = ( + request: MiddyLikeRequest +): BasePersistenceLayerInterface => { + const persistenceStore = request.internal[ + `${IDEMPOTENCY_KEY}.idempotencyPersistenceStore` + ] as BasePersistenceLayerInterface; + + return persistenceStore; +}; + +/** + * @internal + * Utility function to set the persistence store in the request internal storage + * + * @param request The Middy request object + * @param persistenceStore The persistence store to set in the request internal + */ +const setPersistenceStoreInRequestInternal = ( + request: MiddyLikeRequest, + persistenceStore: BasePersistenceLayerInterface +): void => { + request.internal[`${IDEMPOTENCY_KEY}.idempotencyPersistenceStore`] = + persistenceStore; +}; + +/** + * @internal + * Utility function to set a flag in the request internal storage to skip the idempotency middleware + * This is used to skip the idempotency middleware when the idempotency key is not present in the payload + * or when idempotency is disabled + * + * @param request The Middy request object + */ +const setIdempotencySkipFlag = (request: MiddyLikeRequest): void => { + request.internal[`${IDEMPOTENCY_KEY}.skip`] = true; +}; + +/** + * @internal + * Utility function to get the idempotency key from the request internal storage + * and determine if the request should skip the idempotency middleware + * + * @param request The Middy request object + * @returns Whether the idempotency middleware should be skipped + */ +const shouldSkipIdempotency = (request: MiddyLikeRequest): boolean => { + return request.internal[`${IDEMPOTENCY_KEY}.skip`] === true; +}; + /** * A middy middleware to make your Lambda Handler idempotent. * * @example * ```typescript - * import { - * makeHandlerIdempotent, - * DynamoDBPersistenceLayer, - * } from '@aws-lambda-powertools/idempotency'; + * import { makeHandlerIdempotent } from '@aws-lambda-powertools/idempotency/middleware'; + * import { DynamoDBPersistenceLayer } from '@aws-lambda-powertools/idempotency/dynamodb'; * import middy from '@middy/core'; * - * const dynamoDBPersistenceLayer = new DynamoDBPersistenceLayer({ - * tableName: 'idempotencyTable', + * const persistenceStore = new DynamoDBPersistenceLayer({ + * tableName: 'idempotencyTable', * }); * - * const lambdaHandler = async (_event: unknown, _context: unknown) => { - * //... - * }; - * - * export const handler = middy(lambdaHandler) - * .use(makeHandlerIdempotent({ persistenceStore: dynamoDBPersistenceLayer })); + * export const handler = middy( + * async (_event: unknown, _context: unknown): Promise => { + * // your code goes here + * } + * ).use(makeHandlerIdempotent({ persistenceStore: dynamoDBPersistenceLayer })); * ``` * * @param options - Options for the idempotency middleware @@ -43,17 +100,6 @@ import { const makeHandlerIdempotent = ( options: IdempotencyLambdaHandlerOptions ): MiddlewareLikeObj => { - const idempotencyConfig = options.config - ? options.config - : new IdempotencyConfig({}); - const persistenceStore = options.persistenceStore; - persistenceStore.configure({ - config: idempotencyConfig, - }); - - // keep the flag for after and onError checks - let shouldSkipIdempotency = false; - /** * Function called before the handler is executed. * @@ -76,7 +122,16 @@ const makeHandlerIdempotent = ( request: MiddyLikeRequest, retryNo = 0 ): Promise => { + const idempotencyConfig = options.config + ? options.config + : new IdempotencyConfig({}); + const persistenceStore = options.persistenceStore; + persistenceStore.configure({ + config: idempotencyConfig, + }); + if ( + !idempotencyConfig.isEnabled() || IdempotencyHandler.shouldSkipIdempotency( idempotencyConfig.eventKeyJmesPath, idempotencyConfig.throwOnNoIdempotencyKey, @@ -84,10 +139,17 @@ const makeHandlerIdempotent = ( ) ) { // set the flag to skip checks in after and onError - shouldSkipIdempotency = true; + setIdempotencySkipFlag(request); return; } + + /** + * Store the persistence store in the request internal so that it can be + * used in after and onError + */ + setPersistenceStoreInRequestInternal(request, persistenceStore); + try { await persistenceStore.saveInProgress( request.event as JSONValue, @@ -129,6 +191,7 @@ const makeHandlerIdempotent = ( } } }; + /** * Function called after the handler has executed successfully. * @@ -139,9 +202,10 @@ const makeHandlerIdempotent = ( * @param request - The Middy request object */ const after = async (request: MiddyLikeRequest): Promise => { - if (shouldSkipIdempotency) { + if (shouldSkipIdempotency(request)) { return; } + const persistenceStore = getPersistenceStoreFromRequestInternal(request); try { await persistenceStore.saveSuccess( request.event as JSONValue, @@ -164,9 +228,10 @@ const makeHandlerIdempotent = ( * @param request - The Middy request object */ const onError = async (request: MiddyLikeRequest): Promise => { - if (shouldSkipIdempotency) { + if (shouldSkipIdempotency(request)) { return; } + const persistenceStore = getPersistenceStoreFromRequestInternal(request); try { await persistenceStore.deleteRecord(request.event as JSONValue); } catch (error) { @@ -177,19 +242,11 @@ const makeHandlerIdempotent = ( } }; - if (idempotencyConfig.isEnabled()) { - return { - before, - after, - onError, - }; - } else { - return { - before: () => { - return undefined; - }, - }; - } + return { + before, + after, + onError, + }; }; export { makeHandlerIdempotent }; diff --git a/packages/idempotency/src/persistence/BasePersistenceLayerInterface.ts b/packages/idempotency/src/persistence/BasePersistenceLayerInterface.ts index ce0d68b5d7..f4e792082a 100644 --- a/packages/idempotency/src/persistence/BasePersistenceLayerInterface.ts +++ b/packages/idempotency/src/persistence/BasePersistenceLayerInterface.ts @@ -1,10 +1,11 @@ import { IdempotencyRecord } from './IdempotencyRecord'; import type { BasePersistenceLayerOptions } from '../types/BasePersistenceLayer'; +// TODO: move this to types folder interface BasePersistenceLayerInterface { configure(options?: BasePersistenceLayerOptions): void; isPayloadValidationEnabled(): boolean; - saveInProgress(data: unknown): Promise; + saveInProgress(data: unknown, remainingTimeInMillis?: number): Promise; saveSuccess(data: unknown, result: unknown): Promise; deleteRecord(data: unknown): Promise; getRecord(data: unknown): Promise; diff --git a/packages/idempotency/tests/e2e/makeFunctionIdempotent.test.FunctionCode.ts b/packages/idempotency/tests/e2e/makeFunctionIdempotent.test.FunctionCode.ts deleted file mode 100644 index 8e947f7950..0000000000 --- a/packages/idempotency/tests/e2e/makeFunctionIdempotent.test.FunctionCode.ts +++ /dev/null @@ -1,74 +0,0 @@ -import type { Context } from 'aws-lambda'; -import { DynamoDBPersistenceLayer } from '../../src/persistence/DynamoDBPersistenceLayer'; -import { makeFunctionIdempotent } from '../../src'; -import { Logger } from '@aws-lambda-powertools/logger'; -import { IdempotencyConfig } from '../../src'; - -const IDEMPOTENCY_TABLE_NAME = - process.env.IDEMPOTENCY_TABLE_NAME || 'table_name'; -const dynamoDBPersistenceLayer = new DynamoDBPersistenceLayer({ - tableName: IDEMPOTENCY_TABLE_NAME, -}); - -const ddbPersistenceLayerCustomized = new DynamoDBPersistenceLayer({ - tableName: IDEMPOTENCY_TABLE_NAME, - dataAttr: 'dataattr', - keyAttr: 'customId', - expiryAttr: 'expiryattr', - statusAttr: 'statusattr', - inProgressExpiryAttr: 'inprogressexpiryattr', - staticPkValue: 'staticpkvalue', - validationKeyAttr: 'validationkeyattr', -}); - -interface EventRecords { - records: Record[]; -} - -const logger = new Logger(); - -const processRecord = (record: Record): string => { - logger.info(`Got test event: ${JSON.stringify(record)}`); - - return 'Processing done: ' + record['foo']; -}; - -const idempotencyConfig = new IdempotencyConfig({}); - -const processIdempotently = makeFunctionIdempotent(processRecord, { - persistenceStore: dynamoDBPersistenceLayer, - dataKeywordArgument: 'foo', - config: idempotencyConfig, -}); - -export const handler = async ( - _event: EventRecords, - _context: Context -): Promise => { - idempotencyConfig.registerLambdaContext(_context); - for (const record of _event.records) { - const result = await processIdempotently(record); - logger.info(result.toString()); - } - - return Promise.resolve(); -}; - -const processIdempotentlyCustomized = makeFunctionIdempotent(processRecord, { - persistenceStore: ddbPersistenceLayerCustomized, - dataKeywordArgument: 'foo', - config: idempotencyConfig, -}); - -export const handlerCustomized = async ( - _event: EventRecords, - _context: Context -): Promise => { - idempotencyConfig.registerLambdaContext(_context); - for (const record of _event.records) { - const result = await processIdempotentlyCustomized(record); - logger.info(result.toString()); - } - - return Promise.resolve(); -}; diff --git a/packages/idempotency/tests/e2e/makeFunctionIdempotent.test.ts b/packages/idempotency/tests/e2e/makeFunctionIdempotent.test.ts deleted file mode 100644 index cd4344b024..0000000000 --- a/packages/idempotency/tests/e2e/makeFunctionIdempotent.test.ts +++ /dev/null @@ -1,203 +0,0 @@ -/** - * Test makeFunctionIdempotent - * - * @group e2e/idempotency - */ -import { - generateUniqueName, - invokeFunction, - isValidRuntimeKey, -} from '../../../commons/tests/utils/e2eUtils'; -import { - RESOURCE_NAME_PREFIX, - SETUP_TIMEOUT, - TEARDOWN_TIMEOUT, - TEST_CASE_TIMEOUT, -} from './constants'; -import { v4 } from 'uuid'; -import { App, Stack } from 'aws-cdk-lib'; -import { DynamoDBClient } from '@aws-sdk/client-dynamodb'; -import { createHash } from 'node:crypto'; -import { - deployStack, - destroyStack, -} from '../../../commons/tests/utils/cdk-cli'; -import { GetCommand, ScanCommand } from '@aws-sdk/lib-dynamodb'; -import { createIdempotencyResources } from '../helpers/idempotencyUtils'; - -const runtime: string = process.env.RUNTIME || 'nodejs18x'; - -if (!isValidRuntimeKey(runtime)) { - throw new Error(`Invalid runtime key value: ${runtime}`); -} -const uuid = v4(); -const stackName = generateUniqueName( - RESOURCE_NAME_PREFIX, - uuid, - runtime, - 'makeFnIdempotent' -); -const makeFunctionIdepmpotentFile = - 'makeFunctionIdempotent.test.FunctionCode.ts'; - -const app = new App(); - -const ddb = new DynamoDBClient({ region: 'eu-west-1' }); -const stack = new Stack(app, stackName); - -const functionNameDefault = generateUniqueName( - RESOURCE_NAME_PREFIX, - uuid, - runtime, - 'default' -); -const ddbTableNameDefault = stackName + '-default-table'; -createIdempotencyResources( - stack, - runtime, - ddbTableNameDefault, - makeFunctionIdepmpotentFile, - functionNameDefault, - 'handler' -); - -const functionNameCustom = generateUniqueName( - RESOURCE_NAME_PREFIX, - uuid, - runtime, - 'custom' -); -const ddbTableNameCustom = stackName + '-custom-table'; -createIdempotencyResources( - stack, - runtime, - ddbTableNameCustom, - makeFunctionIdepmpotentFile, - functionNameCustom, - 'handlerCustomized', - 'customId' -); - -const functionNameKeywordArg = generateUniqueName( - RESOURCE_NAME_PREFIX, - uuid, - runtime, - 'keywordarg' -); -const ddbTableNameKeywordArg = stackName + '-keywordarg-table'; -createIdempotencyResources( - stack, - runtime, - ddbTableNameKeywordArg, - makeFunctionIdepmpotentFile, - functionNameKeywordArg, - 'handlerWithKeywordArgument' -); - -describe('Idempotency e2e test function wrapper, default settings', () => { - beforeAll(async () => { - await deployStack(app, stack); - }, SETUP_TIMEOUT); - - it( - 'when called twice, it returns the same result', - async () => { - const payload = { - records: [ - { id: 1, foo: 'bar' }, - { id: 2, foo: 'baz' }, - { id: 3, foo: 'bar' }, - ], - }; - const invokeStart = Date.now(); - await invokeFunction( - functionNameDefault, - 2, - 'SEQUENTIAL', - payload, - false - ); - - const payloadHashFirst = createHash('md5') - .update(JSON.stringify('bar')) - .digest('base64'); - const payloadHashSecond = createHash('md5') - .update(JSON.stringify('baz')) - .digest('base64'); - - const result = await ddb.send( - new ScanCommand({ TableName: ddbTableNameDefault }) - ); - expect(result?.Items?.length).toEqual(2); - - const resultFirst = await ddb.send( - new GetCommand({ - TableName: ddbTableNameDefault, - Key: { id: `${functionNameDefault}#${payloadHashFirst}` }, - }) - ); - expect(resultFirst?.Item?.data).toEqual('Processing done: bar'); - expect(resultFirst?.Item?.expiration).toBeGreaterThan(Date.now() / 1000); - expect(resultFirst?.Item?.in_progress_expiration).toBeGreaterThan( - invokeStart - ); - expect(resultFirst?.Item?.status).toEqual('COMPLETED'); - - const resultSecond = await ddb.send( - new GetCommand({ - TableName: ddbTableNameDefault, - Key: { id: `${functionNameDefault}#${payloadHashSecond}` }, - }) - ); - expect(resultSecond?.Item?.data).toEqual('Processing done: baz'); - expect(resultSecond?.Item?.expiration).toBeGreaterThan(Date.now() / 1000); - expect(resultSecond?.Item?.in_progress_expiration).toBeGreaterThan( - invokeStart - ); - expect(resultSecond?.Item?.status).toEqual('COMPLETED'); - }, - TEST_CASE_TIMEOUT - ); - - test( - 'when called with customized function wrapper, it creates ddb entry with custom attributes', - async () => { - const payload = { - records: [ - { id: 1, foo: 'bar' }, - { id: 2, foo: 'baq' }, - { id: 3, foo: 'bar' }, - ], - }; - const payloadHash = createHash('md5').update('"bar"').digest('base64'); - - const invocationLogsCustmozed = await invokeFunction( - functionNameCustom, - 2, - 'SEQUENTIAL', - payload, - false - ); - const result = await ddb.send( - new GetCommand({ - TableName: ddbTableNameCustom, - Key: { customId: `${functionNameCustom}#${payloadHash}` }, - }) - ); - console.log(result); - expect(result?.Item?.dataattr).toEqual('Processing done: bar'); - expect(result?.Item?.statusattr).toEqual('COMPLETED'); - expect(result?.Item?.expiryattr).toBeGreaterThan(Date.now() / 1000); - expect(invocationLogsCustmozed[0].getFunctionLogs().toString()).toContain( - 'Got test event' - ); - }, - TEST_CASE_TIMEOUT - ); - - afterAll(async () => { - if (!process.env.DISABLE_TEARDOWN) { - await destroyStack(app, stack); - } - }, TEARDOWN_TIMEOUT); -}); diff --git a/packages/idempotency/tests/e2e/makeHandlerIdempotent.test.FunctionCode.ts b/packages/idempotency/tests/e2e/makeHandlerIdempotent.test.FunctionCode.ts new file mode 100644 index 0000000000..510dcd399e --- /dev/null +++ b/packages/idempotency/tests/e2e/makeHandlerIdempotent.test.FunctionCode.ts @@ -0,0 +1,119 @@ +import type { Context } from 'aws-lambda'; +import { DynamoDBPersistenceLayer } from '../../src/persistence/DynamoDBPersistenceLayer'; +import { makeHandlerIdempotent } from '../../src/middleware'; +import { IdempotencyConfig } from '../../src'; +import { Logger } from '@aws-lambda-powertools/logger'; +import middy from '@middy/core'; + +const IDEMPOTENCY_TABLE_NAME = + process.env.IDEMPOTENCY_TABLE_NAME || 'table_name'; + +const dynamoDBPersistenceLayer = new DynamoDBPersistenceLayer({ + tableName: IDEMPOTENCY_TABLE_NAME, +}); +const logger = new Logger(); + +/** + * Test handler with sequential execution. + */ +export const handler = middy( + async (event: { foo: string }, context: Context) => { + logger.addContext(context); + logger.info(`foo`, { details: event.foo }); + + return event.foo; + } +).use( + makeHandlerIdempotent({ + persistenceStore: dynamoDBPersistenceLayer, + }) +); + +/** + * Test handler with parallel execution. + * + * We put a 1.5s delay in the handler to ensure that it doesn't return + * before the second call is made. This way the slowest call will be + * rejected and the fastest will be processed. + */ +export const handlerParallel = middy( + async (event: { foo: string }, context: Context) => { + logger.addContext(context); + + await new Promise((resolve) => setTimeout(resolve, 1500)); + + logger.info('Processed event', { details: event.foo }); + + return event.foo; + } +).use( + makeHandlerIdempotent({ + persistenceStore: dynamoDBPersistenceLayer, + }) +); + +/** + * Test handler with timeout and JMESPath expression to extract the + * idempotency key. + * + * We put a 0.5s delay in the handler to ensure that it will timeout + * (timeout is set to 1s). By the time the second call is made, the + * second call is made, the first idempotency record has expired. + */ +export const handlerTimeout = middy( + async (event: { foo: string; invocation: number }, context: Context) => { + logger.addContext(context); + + if (event.invocation === 0) { + await new Promise((resolve) => setTimeout(resolve, 2000)); + } + + logger.info('Processed event', { + details: event.foo, + }); + + return { + foo: event.foo, + invocation: event.invocation, + }; + } +).use( + makeHandlerIdempotent({ + persistenceStore: dynamoDBPersistenceLayer, + config: new IdempotencyConfig({ + eventKeyJmesPath: 'foo', + }), + }) +); + +/** + * Test handler with expired idempotency record. + * + * We configure the idempotency utility to expire records after 1s. + * By the time the second call is made, the first idempotency record + * has expired. The second call will be processed. We include a JMESPath + * expression to extract the idempotency key (`foo`) but we return the + * invocation number as well so that we can check that the second call + * was processed by looking at the value in the stored idempotency record. + */ +export const handlerExpired = middy( + async (event: { foo: string; invocation: number }, context: Context) => { + logger.addContext(context); + + logger.info('Processed event', { details: event.foo }); + + return { + foo: event.foo, + invocation: event.invocation, + }; + } +).use( + makeHandlerIdempotent({ + persistenceStore: dynamoDBPersistenceLayer, + config: new IdempotencyConfig({ + useLocalCache: false, + expiresAfterSeconds: 1, + eventKeyJmesPath: 'foo', + }), + }) +); diff --git a/packages/idempotency/tests/e2e/makeHandlerIdempotent.test.ts b/packages/idempotency/tests/e2e/makeHandlerIdempotent.test.ts new file mode 100644 index 0000000000..f5bef822db --- /dev/null +++ b/packages/idempotency/tests/e2e/makeHandlerIdempotent.test.ts @@ -0,0 +1,390 @@ +/** + * Test makeHandlerIdempotent middleware + * + * @group e2e/idempotency/makeHandlerIdempotent + */ +import { + generateUniqueName, + invokeFunction, + isValidRuntimeKey, +} from '../../../commons/tests/utils/e2eUtils'; +import { InvocationLogs } from '../../../commons/tests/utils/InvocationLogs'; +import { + RESOURCE_NAME_PREFIX, + SETUP_TIMEOUT, + TEARDOWN_TIMEOUT, + TEST_CASE_TIMEOUT, +} from './constants'; +import { + deployStack, + destroyStack, +} from '../../../commons/tests/utils/cdk-cli'; +import { v4 } from 'uuid'; +import { App, Stack } from 'aws-cdk-lib'; +import { DynamoDBClient } from '@aws-sdk/client-dynamodb'; +import { createHash } from 'node:crypto'; +import { ScanCommand } from '@aws-sdk/lib-dynamodb'; +import { createIdempotencyResources } from '../helpers/idempotencyUtils'; + +const runtime: string = process.env.RUNTIME || 'nodejs18x'; + +if (!isValidRuntimeKey(runtime)) { + throw new Error(`Invalid runtime key value: ${runtime}`); +} + +const uuid = v4(); +const stackName = generateUniqueName( + RESOURCE_NAME_PREFIX, + uuid, + runtime, + 'makeFnIdempotent' +); +const makeHandlerIdempotentFile = 'makeHandlerIdempotent.test.FunctionCode.ts'; + +const app = new App(); + +const ddb = new DynamoDBClient({}); +const stack = new Stack(app, stackName); + +const testDefault = 'default-sequential'; +const functionNameDefault = generateUniqueName( + RESOURCE_NAME_PREFIX, + uuid, + runtime, + `${testDefault}-fn` +); +const ddbTableNameDefault = generateUniqueName( + RESOURCE_NAME_PREFIX, + uuid, + runtime, + `${testDefault}-table` +); +createIdempotencyResources( + stack, + runtime, + ddbTableNameDefault, + makeHandlerIdempotentFile, + functionNameDefault, + 'handler' +); + +const testDefaultParallel = 'default-parallel'; +const functionNameDefaultParallel = generateUniqueName( + RESOURCE_NAME_PREFIX, + uuid, + runtime, + `${testDefaultParallel}-fn` +); +const ddbTableNameDefaultParallel = generateUniqueName( + RESOURCE_NAME_PREFIX, + uuid, + runtime, + `${testDefaultParallel}-table` +); +createIdempotencyResources( + stack, + runtime, + ddbTableNameDefaultParallel, + makeHandlerIdempotentFile, + functionNameDefaultParallel, + 'handlerParallel' +); + +const testTimeout = 'timeout'; +const functionNameTimeout = generateUniqueName( + RESOURCE_NAME_PREFIX, + uuid, + runtime, + `${testTimeout}-fn` +); +const ddbTableNameTimeout = generateUniqueName( + RESOURCE_NAME_PREFIX, + uuid, + runtime, + `${testTimeout}-table` +); +createIdempotencyResources( + stack, + runtime, + ddbTableNameTimeout, + makeHandlerIdempotentFile, + functionNameTimeout, + 'handlerTimeout', + undefined, + 2 +); + +const testExpired = 'expired'; +const functionNameExpired = generateUniqueName( + RESOURCE_NAME_PREFIX, + uuid, + runtime, + `${testExpired}-fn` +); +const ddbTableNameExpired = generateUniqueName( + RESOURCE_NAME_PREFIX, + uuid, + runtime, + `${testExpired}-table` +); +createIdempotencyResources( + stack, + runtime, + ddbTableNameExpired, + makeHandlerIdempotentFile, + functionNameExpired, + 'handlerExpired', + undefined, + 2 +); + +describe(`Idempotency E2E tests, middy middleware usage for runtime ${runtime}`, () => { + beforeAll(async () => { + await deployStack(app, stack); + }, SETUP_TIMEOUT); + + test( + 'when called twice with the same payload, it returns the same result and runs the handler once', + async () => { + // Prepare + const payload = { + foo: 'bar', + }; + const payloadHash = createHash('md5') + .update(JSON.stringify(payload)) + .digest('base64'); + + // Act + const logs = await invokeFunction( + functionNameDefault, + 2, + 'SEQUENTIAL', + payload, + false + ); + const functionLogs = logs.map((log) => log.getFunctionLogs()); + + // Assess + const idempotencyRecords = await ddb.send( + new ScanCommand({ + TableName: ddbTableNameDefault, + }) + ); + expect(idempotencyRecords.Items?.length).toEqual(1); + expect(idempotencyRecords.Items?.[0].id).toEqual( + `${functionNameDefault}#${payloadHash}` + ); + expect(idempotencyRecords.Items?.[0].data).toEqual('bar'); + expect(idempotencyRecords.Items?.[0].status).toEqual('COMPLETED'); + + // During the first invocation the handler should be called, so the logs should contain 1 log + expect(functionLogs[0]).toHaveLength(1); + // We test the content of the log as well as the presence of fields from the context, this + // ensures that the all the arguments are passed to the handler when made idempotent + expect(InvocationLogs.parseFunctionLog(functionLogs[0][0])).toEqual( + expect.objectContaining({ + message: 'foo', + details: 'bar', + function_name: functionNameDefault, + }) + ); + // During the second invocation the handler should not be called, so the logs should be empty + expect(functionLogs[1]).toHaveLength(0); + }, + TEST_CASE_TIMEOUT + ); + + test( + 'when two identical requests are sent in parallel, the handler is called only once', + async () => { + // Prepare + const payload = { + foo: 'bar', + }; + const payloadHash = createHash('md5') + .update(JSON.stringify(payload)) + .digest('base64'); + + // Act + const logs = await invokeFunction( + functionNameDefaultParallel, + 2, + 'PARALLEL', + payload, + false + ); + const functionLogs = logs.map((log) => log.getFunctionLogs()); + + // Assess + const idempotencyRecords = await ddb.send( + new ScanCommand({ + TableName: ddbTableNameDefaultParallel, + }) + ); + expect(idempotencyRecords.Items?.length).toEqual(1); + expect(idempotencyRecords.Items?.[0].id).toEqual( + `${functionNameDefaultParallel}#${payloadHash}` + ); + expect(idempotencyRecords.Items?.[0].data).toEqual('bar'); + expect(idempotencyRecords.Items?.[0].status).toEqual('COMPLETED'); + + /** + * Since the requests are sent in parallel we don't know which one will be processed first, + * however we expect that only on of them will be processed by the handler, while the other + * one will be rejected with IdempotencyAlreadyInProgressError. + * + * We filter the logs to find which one was successful and which one failed, then we check + * that they contain the expected logs. + */ + const successfulInvocationLogs = functionLogs.find( + (functionLog) => + functionLog.find((log) => log.includes('Processed event')) !== + undefined + ); + const failedInvocationLogs = functionLogs.find( + (functionLog) => + functionLog.find((log) => + log.includes('There is already an execution in progress') + ) !== undefined + ); + expect(successfulInvocationLogs).toHaveLength(1); + expect(failedInvocationLogs).toHaveLength(1); + }, + TEST_CASE_TIMEOUT + ); + + test( + 'when the function times out, the second request is processed correctly by the handler', + async () => { + // Prepare + const payload = { + foo: 'bar', + }; + const payloadHash = createHash('md5') + .update(JSON.stringify(payload.foo)) + .digest('base64'); + + // Act + const logs = await invokeFunction( + functionNameTimeout, + 2, + 'SEQUENTIAL', + payload, + true + ); + const functionLogs = logs.map((log) => log.getFunctionLogs()); + + // Assess + const idempotencyRecords = await ddb.send( + new ScanCommand({ + TableName: ddbTableNameTimeout, + }) + ); + expect(idempotencyRecords.Items?.length).toEqual(1); + expect(idempotencyRecords.Items?.[0].id).toEqual( + `${functionNameTimeout}#${payloadHash}` + ); + expect(idempotencyRecords.Items?.[0].data).toEqual({ + ...payload, + invocation: 1, + }); + expect(idempotencyRecords.Items?.[0].status).toEqual('COMPLETED'); + + // During the first invocation the function should timeout so the logs should contain 2 logs + expect(functionLogs[0]).toHaveLength(2); + expect(functionLogs[0][0]).toContain('Task timed out after'); + // During the second invocation the handler should be called and complete, so the logs should + // contain 1 log + expect(functionLogs[1]).toHaveLength(1); + expect(InvocationLogs.parseFunctionLog(functionLogs[1][0])).toEqual( + expect.objectContaining({ + message: 'Processed event', + details: 'bar', + function_name: functionNameTimeout, + }) + ); + }, + TEST_CASE_TIMEOUT + ); + + test( + 'when the idempotency record is expired, the second request is processed correctly by the handler', + async () => { + // Prepare + const payload = { + foo: 'bar', + }; + const payloadHash = createHash('md5') + .update(JSON.stringify(payload.foo)) + .digest('base64'); + + // Act + const logs = [ + ( + await invokeFunction( + functionNameExpired, + 1, + 'SEQUENTIAL', + { ...payload, invocation: 0 }, + false + ) + )[0], + ]; + // Wait for the idempotency record to expire + await new Promise((resolve) => setTimeout(resolve, 2000)); + logs.push( + ( + await invokeFunction( + functionNameExpired, + 1, + 'SEQUENTIAL', + { ...payload, invocation: 1 }, + false + ) + )[0] + ); + const functionLogs = logs.map((log) => log.getFunctionLogs()); + + // Assess + const idempotencyRecords = await ddb.send( + new ScanCommand({ + TableName: ddbTableNameExpired, + }) + ); + expect(idempotencyRecords.Items?.length).toEqual(1); + expect(idempotencyRecords.Items?.[0].id).toEqual( + `${functionNameExpired}#${payloadHash}` + ); + expect(idempotencyRecords.Items?.[0].data).toEqual({ + ...payload, + invocation: 1, + }); + expect(idempotencyRecords.Items?.[0].status).toEqual('COMPLETED'); + + // Both invocations should be successful and the logs should contain 1 log each + expect(functionLogs[0]).toHaveLength(1); + expect(InvocationLogs.parseFunctionLog(functionLogs[1][0])).toEqual( + expect.objectContaining({ + message: 'Processed event', + details: 'bar', + function_name: functionNameExpired, + }) + ); + // During the second invocation the handler should be called and complete, so the logs should + // contain 1 log + expect(functionLogs[1]).toHaveLength(1); + expect(InvocationLogs.parseFunctionLog(functionLogs[1][0])).toEqual( + expect.objectContaining({ + message: 'Processed event', + details: 'bar', + function_name: functionNameExpired, + }) + ); + }, + TEST_CASE_TIMEOUT + ); + + afterAll(async () => { + await destroyStack(app, stack); + }, TEARDOWN_TIMEOUT); +}); diff --git a/packages/idempotency/tests/e2e/makeIdempotent.test.FunctionCode.ts b/packages/idempotency/tests/e2e/makeIdempotent.test.FunctionCode.ts new file mode 100644 index 0000000000..9786ddea0e --- /dev/null +++ b/packages/idempotency/tests/e2e/makeIdempotent.test.FunctionCode.ts @@ -0,0 +1,105 @@ +import type { Context } from 'aws-lambda'; +import { DynamoDBPersistenceLayer } from '../../src/persistence/DynamoDBPersistenceLayer'; +import { makeIdempotent } from '../../src'; +import { Logger } from '@aws-lambda-powertools/logger'; +import { IdempotencyConfig } from '../../src'; + +const IDEMPOTENCY_TABLE_NAME = + process.env.IDEMPOTENCY_TABLE_NAME || 'table_name'; + +// Default persistence layer +const dynamoDBPersistenceLayer = new DynamoDBPersistenceLayer({ + tableName: IDEMPOTENCY_TABLE_NAME, +}); + +// Customized persistence layer +const ddbPersistenceLayerCustomized = new DynamoDBPersistenceLayer({ + tableName: IDEMPOTENCY_TABLE_NAME, + dataAttr: 'dataAttr', + keyAttr: 'customId', + expiryAttr: 'expiryAttr', + statusAttr: 'statusAttr', + inProgressExpiryAttr: 'inProgressExpiryAttr', + staticPkValue: 'staticPkValue', + validationKeyAttr: 'validationKeyAttr', +}); + +const logger = new Logger(); + +/** + * Test idempotent arbitrary function with default persistence layer configs. + */ +const idempotencyConfig = new IdempotencyConfig({}); +const processIdempotently = makeIdempotent( + (record: Record): string => { + logger.info('Got test event', { record }); + + return `Processing done: ${record.foo}`; + }, + { + persistenceStore: dynamoDBPersistenceLayer, + config: idempotencyConfig, + } +); + +export const handlerDefault = async ( + event: { + records: Record[]; + }, + context: Context +): Promise => { + idempotencyConfig.registerLambdaContext(context); + for (const record of event.records) { + await processIdempotently(record); + } +}; + +/** + * Test idempotent arbitrary function with customized persistence layer configs + * and JMESPath expression to enable payload validation. + */ +const idempotencyConfigWithSelection = new IdempotencyConfig({ + payloadValidationJmesPath: 'foo', +}); +const processIdempotentlyCustomized = makeIdempotent( + (baz: number, record: Record): Record => { + logger.info('Got test event', { baz, record }); + + return record; + }, + { + persistenceStore: ddbPersistenceLayerCustomized, + config: idempotencyConfigWithSelection, + dataIndexArgument: 1, + } +); + +export const handlerCustomized = async ( + event: { + records: Record[]; + }, + context: Context +): Promise => { + idempotencyConfigWithSelection.registerLambdaContext(context); + for (const [idx, record] of event.records.entries()) { + await processIdempotentlyCustomized(idx, record); + } +}; + +/** + * Test idempotent Lambda handler with JMESPath expression to extract event key. + */ +export const handlerLambda = makeIdempotent( + async (event: { foo: string }, context: Context) => { + logger.addContext(context); + logger.info(`foo`, { details: event.foo }); + + return event.foo; + }, + { + persistenceStore: dynamoDBPersistenceLayer, + config: new IdempotencyConfig({ + eventKeyJmesPath: 'foo', + }), + } +); diff --git a/packages/idempotency/tests/e2e/makeIdempotent.test.ts b/packages/idempotency/tests/e2e/makeIdempotent.test.ts new file mode 100644 index 0000000000..1906a8a16d --- /dev/null +++ b/packages/idempotency/tests/e2e/makeIdempotent.test.ts @@ -0,0 +1,336 @@ +/** + * Test makeIdempotent function + * + * @group e2e/idempotency/makeIdempotent + */ +import { + generateUniqueName, + invokeFunction, + isValidRuntimeKey, +} from '../../../commons/tests/utils/e2eUtils'; +import { + RESOURCE_NAME_PREFIX, + SETUP_TIMEOUT, + TEARDOWN_TIMEOUT, + TEST_CASE_TIMEOUT, +} from './constants'; +import { v4 } from 'uuid'; +import { App, Stack } from 'aws-cdk-lib'; +import { DynamoDBClient } from '@aws-sdk/client-dynamodb'; +import { createHash } from 'node:crypto'; +import { + deployStack, + destroyStack, +} from '../../../commons/tests/utils/cdk-cli'; +import { ScanCommand } from '@aws-sdk/lib-dynamodb'; +import { createIdempotencyResources } from '../helpers/idempotencyUtils'; +import { InvocationLogs } from '@aws-lambda-powertools/commons/tests/utils/InvocationLogs'; + +const runtime: string = process.env.RUNTIME || 'nodejs18x'; + +if (!isValidRuntimeKey(runtime)) { + throw new Error(`Invalid runtime key value: ${runtime}`); +} +const uuid = v4(); +const stackName = generateUniqueName( + RESOURCE_NAME_PREFIX, + uuid, + runtime, + 'makeFnIdempotent' +); +const makeFunctionIdempotentFile = 'makeIdempotent.test.FunctionCode.ts'; + +const app = new App(); + +const ddb = new DynamoDBClient({ region: 'eu-west-1' }); +const stack = new Stack(app, stackName); + +const testDefault = 'default'; +const functionNameDefault = generateUniqueName( + RESOURCE_NAME_PREFIX, + uuid, + runtime, + `${testDefault}-fn` +); +const ddbTableNameDefault = generateUniqueName( + RESOURCE_NAME_PREFIX, + uuid, + runtime, + `${testDefault}-table` +); +createIdempotencyResources( + stack, + runtime, + ddbTableNameDefault, + makeFunctionIdempotentFile, + functionNameDefault, + 'handlerDefault' +); + +const testCustomConfig = 'customConfig'; +const functionNameCustomConfig = generateUniqueName( + RESOURCE_NAME_PREFIX, + uuid, + runtime, + `${testCustomConfig}-fn` +); +const ddbTableNameCustomConfig = generateUniqueName( + RESOURCE_NAME_PREFIX, + uuid, + runtime, + `${testCustomConfig}-fn` +); +createIdempotencyResources( + stack, + runtime, + ddbTableNameCustomConfig, + makeFunctionIdempotentFile, + functionNameCustomConfig, + 'handlerCustomized', + 'customId' +); + +const testLambdaHandler = 'handler'; +const functionNameLambdaHandler = generateUniqueName( + RESOURCE_NAME_PREFIX, + uuid, + runtime, + `${testLambdaHandler}-fn` +); +const ddbTableNameLambdaHandler = generateUniqueName( + RESOURCE_NAME_PREFIX, + uuid, + runtime, + `${testLambdaHandler}-table` +); +createIdempotencyResources( + stack, + runtime, + ddbTableNameLambdaHandler, + makeFunctionIdempotentFile, + functionNameLambdaHandler, + 'handlerLambda' +); + +describe(`Idempotency E2E tests, wrapper function usage for runtime`, () => { + beforeAll(async () => { + await deployStack(app, stack); + }, SETUP_TIMEOUT); + + it( + 'when called twice with the same payload, it returns the same result', + async () => { + // Prepare + const payload = { + records: [ + { id: 1, foo: 'bar' }, + { id: 2, foo: 'baz' }, + { id: 1, foo: 'bar' }, + ], + }; + const payloadHashes = payload.records.map((record) => + createHash('md5').update(JSON.stringify(record)).digest('base64') + ); + + // Act + const logs = await invokeFunction( + functionNameDefault, + 2, + 'SEQUENTIAL', + payload, + false + ); + const functionLogs = logs.map((log) => log.getFunctionLogs()); + + // Assess + const idempotencyRecords = await ddb.send( + new ScanCommand({ + TableName: ddbTableNameDefault, + }) + ); + // Since records 1 and 3 have the same payload, only 2 records should be created + expect(idempotencyRecords?.Items?.length).toEqual(2); + const idempotencyRecordsItems = idempotencyRecords.Items?.sort((a, b) => + a.expiration > b.expiration ? 1 : -1 + ); + + expect(idempotencyRecordsItems?.[0]).toStrictEqual({ + id: `${functionNameDefault}#${payloadHashes[0]}`, + data: 'Processing done: bar', + status: 'COMPLETED', + expiration: expect.any(Number), + in_progress_expiration: expect.any(Number), + }); + + expect(idempotencyRecordsItems?.[1]).toStrictEqual({ + id: `${functionNameDefault}#${payloadHashes[1]}`, + data: 'Processing done: baz', + status: 'COMPLETED', + expiration: expect.any(Number), + in_progress_expiration: expect.any(Number), + }); + + expect(functionLogs[0]).toHaveLength(2); + }, + TEST_CASE_TIMEOUT + ); + + test( + 'when called with customized function wrapper, it creates ddb entry with custom attributes', + async () => { + // Prepare + const payload = { + records: [ + { id: 1, foo: 'bar' }, + { id: 2, foo: 'baq' }, + { id: 3, foo: 'bar' }, + ], + }; + const payloadHashes = payload.records.map((record) => + createHash('md5').update(JSON.stringify(record)).digest('base64') + ); + const validationHashes = payload.records.map((record) => + createHash('md5').update(JSON.stringify(record.foo)).digest('base64') + ); + + // Act + const logs = await invokeFunction( + functionNameCustomConfig, + 2, + 'SEQUENTIAL', + payload, + false + ); + const functionLogs = logs.map((log) => log.getFunctionLogs()); + + // Assess + const idempotencyRecords = await ddb.send( + new ScanCommand({ + TableName: ddbTableNameCustomConfig, + }) + ); + /** + * Each record should have a corresponding entry in the persistence store, + * if so then we sort the entries by expiry time and compare them to the + * expected values. Expiry times should be in the same order as the + * payload records. + */ + expect(idempotencyRecords.Items?.length).toEqual(3); + const idempotencyRecordsItems = idempotencyRecords.Items?.sort((a, b) => + a.expiryAttr > b.expiryAttr ? 1 : -1 + ); + + expect(idempotencyRecordsItems?.[0]).toStrictEqual({ + customId: `${functionNameCustomConfig}#${payloadHashes[0]}`, + dataAttr: payload.records[0], + statusAttr: 'COMPLETED', + expiryAttr: expect.any(Number), + inProgressExpiryAttr: expect.any(Number), + validationKeyAttr: validationHashes[0], + }); + + expect(idempotencyRecordsItems?.[1]).toStrictEqual({ + customId: `${functionNameCustomConfig}#${payloadHashes[1]}`, + dataAttr: payload.records[1], + statusAttr: 'COMPLETED', + expiryAttr: expect.any(Number), + inProgressExpiryAttr: expect.any(Number), + validationKeyAttr: validationHashes[1], + }); + + expect(idempotencyRecordsItems?.[2]).toStrictEqual({ + customId: `${functionNameCustomConfig}#${payloadHashes[2]}`, + dataAttr: payload.records[2], + statusAttr: 'COMPLETED', + expiryAttr: expect.any(Number), + inProgressExpiryAttr: expect.any(Number), + validationKeyAttr: validationHashes[2], + }); + + // During the first invocation, the processing function should have been called 3 times (once for each record) + expect(functionLogs[0]).toHaveLength(3); + expect(InvocationLogs.parseFunctionLog(functionLogs[0][0])).toEqual( + expect.objectContaining({ + baz: 0, // index of recursion in handler, assess that all function arguments are preserved + record: payload.records[0], + message: 'Got test event', + }) + ); + expect(InvocationLogs.parseFunctionLog(functionLogs[0][1])).toEqual( + expect.objectContaining({ + baz: 1, + record: payload.records[1], + message: 'Got test event', + }) + ); + expect(InvocationLogs.parseFunctionLog(functionLogs[0][2])).toEqual( + expect.objectContaining({ + baz: 2, + record: payload.records[2], + message: 'Got test event', + }) + ); + + // During the second invocation, the processing function should have been called 0 times (all records are idempotent) + expect(functionLogs[1]).toHaveLength(0); + }, + TEST_CASE_TIMEOUT + ); + + test( + 'when called twice with the same payload, it returns the same result and runs the handler once', + async () => { + // Prepare + const payload = { + foo: 'bar', + }; + const payloadHash = createHash('md5') + .update(JSON.stringify(payload.foo)) + .digest('base64'); + + // Act + const logs = await invokeFunction( + functionNameLambdaHandler, + 2, + 'SEQUENTIAL', + payload, + true + ); + const functionLogs = logs.map((log) => log.getFunctionLogs()); + + // Assess + const idempotencyRecords = await ddb.send( + new ScanCommand({ + TableName: ddbTableNameLambdaHandler, + }) + ); + expect(idempotencyRecords.Items?.length).toEqual(1); + expect(idempotencyRecords.Items?.[0].id).toEqual( + `${functionNameLambdaHandler}#${payloadHash}` + ); + expect(idempotencyRecords.Items?.[0].data).toEqual('bar'); + expect(idempotencyRecords.Items?.[0].status).toEqual('COMPLETED'); + + // During the first invocation the handler should be called, so the logs should contain 1 log + expect(functionLogs[0]).toHaveLength(1); + // We test the content of the log as well as the presence of fields from the context, this + // ensures that the all the arguments are passed to the handler when made idempotent + expect(InvocationLogs.parseFunctionLog(functionLogs[0][0])).toEqual( + expect.objectContaining({ + message: 'foo', + details: 'bar', + function_name: functionNameLambdaHandler, + }) + ); + // During the second invocation the handler should not be called, so the logs should be empty + expect(functionLogs[1]).toHaveLength(0); + }, + TEST_CASE_TIMEOUT + ); + + afterAll(async () => { + if (!process.env.DISABLE_TEARDOWN) { + await destroyStack(app, stack); + } + }, TEARDOWN_TIMEOUT); +}); diff --git a/packages/idempotency/tests/helpers/idempotencyUtils.ts b/packages/idempotency/tests/helpers/idempotencyUtils.ts index 46e328f76a..aa3612d853 100644 --- a/packages/idempotency/tests/helpers/idempotencyUtils.ts +++ b/packages/idempotency/tests/helpers/idempotencyUtils.ts @@ -5,6 +5,7 @@ import { NodejsFunction } from 'aws-cdk-lib/aws-lambda-nodejs'; import { TEST_RUNTIMES } from '../../../commons/tests/utils/e2eUtils'; import { BasePersistenceLayer } from '../../src/persistence'; import path from 'path'; +import { RetentionDays } from 'aws-cdk-lib/aws-logs'; export const createIdempotencyResources = ( stack: Stack, @@ -13,7 +14,8 @@ export const createIdempotencyResources = ( pathToFunction: string, functionName: string, handler: string, - ddbPkId?: string + ddbPkId?: string, + timeout?: number ): void => { const uniqueTableId = ddbTableName + v4().substring(0, 5); const ddbTable = new Table(stack, uniqueTableId, { @@ -31,12 +33,13 @@ export const createIdempotencyResources = ( runtime: TEST_RUNTIMES[runtime], functionName: functionName, entry: path.join(__dirname, `../e2e/${pathToFunction}`), - timeout: Duration.seconds(30), + timeout: Duration.seconds(timeout || 30), handler: handler, environment: { IDEMPOTENCY_TABLE_NAME: ddbTableName, POWERTOOLS_LOGGER_LOG_EVENT: 'true', }, + logRetention: RetentionDays.ONE_DAY, }); ddbTable.grantReadWriteData(nodeJsFunction); diff --git a/packages/idempotency/tests/unit/makeIdempotent.test.ts b/packages/idempotency/tests/unit/makeIdempotent.test.ts index 0eb229e002..1fa880909e 100644 --- a/packages/idempotency/tests/unit/makeIdempotent.test.ts +++ b/packages/idempotency/tests/unit/makeIdempotent.test.ts @@ -1,5 +1,5 @@ /** - * Test Function Wrapper + * Test makeIdempotent Function Wrapper * * @group unit/idempotency/makeIdempotent */ From ea503450d395373a88a01841f48ded0a1b42fbed Mon Sep 17 00:00:00 2001 From: erikayao93 Date: Mon, 10 Jul 2023 16:51:34 +0000 Subject: [PATCH 10/24] Removed unnecessary type casting --- .../batch/tests/unit/BatchProcessor.test.ts | 123 +++++------------- 1 file changed, 33 insertions(+), 90 deletions(-) diff --git a/packages/batch/tests/unit/BatchProcessor.test.ts b/packages/batch/tests/unit/BatchProcessor.test.ts index 4bdb0a95f9..dd0b9383cb 100644 --- a/packages/batch/tests/unit/BatchProcessor.test.ts +++ b/packages/batch/tests/unit/BatchProcessor.test.ts @@ -4,7 +4,6 @@ * @group unit/batch/class/batchprocessor */ import { BatchProcessingError, BatchProcessor, EventType } from '../../src'; -import { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; import { sqsEventFactory, kinesisEventFactory, @@ -46,8 +45,8 @@ describe('Class: BatchProcessor', () => { // Assess expect(processedMessages).toStrictEqual([ - ['success', (firstRecord as SQSRecord).body, firstRecord], - ['success', (secondRecord as SQSRecord).body, secondRecord], + ['success', firstRecord.body, firstRecord], + ['success', secondRecord.body, secondRecord], ]); }); @@ -66,14 +65,14 @@ describe('Class: BatchProcessor', () => { // Assess expect(processedMessages[1]).toStrictEqual([ 'success', - (secondRecord as SQSRecord).body, + secondRecord.body, secondRecord, ]); expect(processor.failureMessages.length).toBe(2); expect(processor.response()).toStrictEqual({ batchItemFailures: [ - { itemIdentifier: (firstRecord as SQSRecord).messageId }, - { itemIdentifier: (thirdRecord as SQSRecord).messageId }, + { itemIdentifier: firstRecord.messageId }, + { itemIdentifier: thirdRecord.messageId }, ], }); }); @@ -109,12 +108,12 @@ describe('Class: BatchProcessor', () => { // Assess expect(processedMessages).toStrictEqual([ - ['success', (firstRecord as SQSRecord).body, firstRecord], - ['success', (secondRecord as SQSRecord).body, secondRecord], + ['success', firstRecord.body, firstRecord], + ['success', secondRecord.body, secondRecord], ]); }); - test('Batch processing SQS records with failures', async () => { + test('Batch processing SQS records with some failures', async () => { // Prepare const firstRecord = sqsEventFactory('failure'); const secondRecord = sqsEventFactory('success'); @@ -129,14 +128,14 @@ describe('Class: BatchProcessor', () => { // Assess expect(processedMessages[1]).toStrictEqual([ 'success', - (secondRecord as SQSRecord).body, + secondRecord.body, secondRecord, ]); expect(processor.failureMessages.length).toBe(2); expect(processor.response()).toStrictEqual({ batchItemFailures: [ - { itemIdentifier: (firstRecord as SQSRecord).messageId }, - { itemIdentifier: (thirdRecord as SQSRecord).messageId }, + { itemIdentifier: firstRecord.messageId }, + { itemIdentifier: thirdRecord.messageId }, ], }); }); @@ -174,20 +173,12 @@ describe('Class: BatchProcessor', () => { // Assess expect(processedMessages).toStrictEqual([ - [ - 'success', - (firstRecord as KinesisStreamRecord).kinesis.data, - firstRecord, - ], - [ - 'success', - (secondRecord as KinesisStreamRecord).kinesis.data, - secondRecord, - ], + ['success', firstRecord.kinesis.data, firstRecord], + ['success', secondRecord.kinesis.data, secondRecord], ]); }); - test('Batch processing Kinesis records with failures', async () => { + test('Batch processing Kinesis records with some failures', async () => { // Prepare const firstRecord = kinesisEventFactory('failure'); const secondRecord = kinesisEventFactory('success'); @@ -202,20 +193,14 @@ describe('Class: BatchProcessor', () => { // Assess expect(processedMessages[1]).toStrictEqual([ 'success', - (secondRecord as KinesisStreamRecord).kinesis.data, + secondRecord.kinesis.data, secondRecord, ]); expect(processor.failureMessages.length).toBe(2); expect(processor.response()).toStrictEqual({ batchItemFailures: [ - { - itemIdentifier: (firstRecord as KinesisStreamRecord).kinesis - .sequenceNumber, - }, - { - itemIdentifier: (thirdRecord as KinesisStreamRecord).kinesis - .sequenceNumber, - }, + { itemIdentifier: firstRecord.kinesis.sequenceNumber }, + { itemIdentifier: thirdRecord.kinesis.sequenceNumber }, ], }); }); @@ -252,20 +237,12 @@ describe('Class: BatchProcessor', () => { // Assess expect(processedMessages).toStrictEqual([ - [ - 'success', - (firstRecord as KinesisStreamRecord).kinesis.data, - firstRecord, - ], - [ - 'success', - (secondRecord as KinesisStreamRecord).kinesis.data, - secondRecord, - ], + ['success', firstRecord.kinesis.data, firstRecord], + ['success', secondRecord.kinesis.data, secondRecord], ]); }); - test('Batch processing Kinesis records with failures', async () => { + test('Batch processing Kinesis records with some failures', async () => { // Prepare const firstRecord = kinesisEventFactory('failure'); const secondRecord = kinesisEventFactory('success'); @@ -280,20 +257,14 @@ describe('Class: BatchProcessor', () => { // Assess expect(processedMessages[1]).toStrictEqual([ 'success', - (secondRecord as KinesisStreamRecord).kinesis.data, + secondRecord.kinesis.data, secondRecord, ]); expect(processor.failureMessages.length).toBe(2); expect(processor.response()).toStrictEqual({ batchItemFailures: [ - { - itemIdentifier: (firstRecord as KinesisStreamRecord).kinesis - .sequenceNumber, - }, - { - itemIdentifier: (thirdRecord as KinesisStreamRecord).kinesis - .sequenceNumber, - }, + { itemIdentifier: firstRecord.kinesis.sequenceNumber }, + { itemIdentifier: thirdRecord.kinesis.sequenceNumber }, ], }); }); @@ -331,16 +302,8 @@ describe('Class: BatchProcessor', () => { // Assess expect(processedMessages).toStrictEqual([ - [ - 'success', - (firstRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, - firstRecord, - ], - [ - 'success', - (secondRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, - secondRecord, - ], + ['success', firstRecord.dynamodb?.NewImage?.Message, firstRecord], + ['success', secondRecord.dynamodb?.NewImage?.Message, secondRecord], ]); }); @@ -359,20 +322,14 @@ describe('Class: BatchProcessor', () => { // Assess expect(processedMessages[1]).toStrictEqual([ 'success', - (secondRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, + secondRecord.dynamodb?.NewImage?.Message, secondRecord, ]); expect(processor.failureMessages.length).toBe(2); expect(processor.response()).toStrictEqual({ batchItemFailures: [ - { - itemIdentifier: (firstRecord as DynamoDBRecord).dynamodb - ?.SequenceNumber, - }, - { - itemIdentifier: (thirdRecord as DynamoDBRecord).dynamodb - ?.SequenceNumber, - }, + { itemIdentifier: firstRecord.dynamodb?.SequenceNumber }, + { itemIdentifier: thirdRecord.dynamodb?.SequenceNumber }, ], }); }); @@ -410,16 +367,8 @@ describe('Class: BatchProcessor', () => { // Assess expect(processedMessages).toStrictEqual([ - [ - 'success', - (firstRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, - firstRecord, - ], - [ - 'success', - (secondRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, - secondRecord, - ], + ['success', firstRecord.dynamodb?.NewImage?.Message, firstRecord], + ['success', secondRecord.dynamodb?.NewImage?.Message, secondRecord], ]); }); @@ -438,20 +387,14 @@ describe('Class: BatchProcessor', () => { // Assess expect(processedMessages[1]).toStrictEqual([ 'success', - (secondRecord as DynamoDBRecord).dynamodb?.NewImage?.Message, + secondRecord.dynamodb?.NewImage?.Message, secondRecord, ]); expect(processor.failureMessages.length).toBe(2); expect(processor.response()).toStrictEqual({ batchItemFailures: [ - { - itemIdentifier: (firstRecord as DynamoDBRecord).dynamodb - ?.SequenceNumber, - }, - { - itemIdentifier: (thirdRecord as DynamoDBRecord).dynamodb - ?.SequenceNumber, - }, + { itemIdentifier: firstRecord.dynamodb?.SequenceNumber }, + { itemIdentifier: thirdRecord.dynamodb?.SequenceNumber }, ], }); }); From eaee1758d5bf1d87f7434216981666bda5161677 Mon Sep 17 00:00:00 2001 From: erikayao93 Date: Mon, 10 Jul 2023 17:11:18 +0000 Subject: [PATCH 11/24] Moved exports for handlers and factories --- packages/batch/tests/helpers/factories.ts | 8 +++++--- packages/batch/tests/helpers/handlers.ts | 25 +++++++++++++++-------- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/packages/batch/tests/helpers/factories.ts b/packages/batch/tests/helpers/factories.ts index ff3d5b5a69..52d789d43a 100644 --- a/packages/batch/tests/helpers/factories.ts +++ b/packages/batch/tests/helpers/factories.ts @@ -1,7 +1,7 @@ import { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; import { v4 } from 'uuid'; -export const sqsEventFactory = (body: string): SQSRecord => { +const sqsEventFactory = (body: string): SQSRecord => { return { messageId: v4(), receiptHandle: 'AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a', @@ -20,7 +20,7 @@ export const sqsEventFactory = (body: string): SQSRecord => { }; }; -export const kinesisEventFactory = (body: string): KinesisStreamRecord => { +const kinesisEventFactory = (body: string): KinesisStreamRecord => { let seq = ''; for (let i = 0; i < 52; i++) { seq = seq + Math.floor(Math.random() * 10); @@ -45,7 +45,7 @@ export const kinesisEventFactory = (body: string): KinesisStreamRecord => { }; }; -export const dynamodbEventFactory = (body: string): DynamoDBRecord => { +const dynamodbEventFactory = (body: string): DynamoDBRecord => { let seq = ''; for (let i = 0; i < 10; i++) { seq = seq + Math.floor(Math.random() * 10); @@ -67,3 +67,5 @@ export const dynamodbEventFactory = (body: string): DynamoDBRecord => { eventSource: 'aws:dynamodb', }; }; + +export { sqsEventFactory, kinesisEventFactory, dynamodbEventFactory }; diff --git a/packages/batch/tests/helpers/handlers.ts b/packages/batch/tests/helpers/handlers.ts index 3bacd04a1a..7f4dd933d3 100644 --- a/packages/batch/tests/helpers/handlers.ts +++ b/packages/batch/tests/helpers/handlers.ts @@ -1,6 +1,6 @@ import { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; -export const sqsRecordHandler = (record: SQSRecord): string => { +const sqsRecordHandler = (record: SQSRecord): string => { const body = record.body; if (body.includes('fail')) { throw Error('Failed to process record.'); @@ -9,9 +9,7 @@ export const sqsRecordHandler = (record: SQSRecord): string => { return body; }; -export const asyncSqsRecordHandler = async ( - record: SQSRecord -): Promise => { +const asyncSqsRecordHandler = async (record: SQSRecord): Promise => { const body = record.body; if (body.includes('fail')) { throw Error('Failed to process record.'); @@ -20,7 +18,7 @@ export const asyncSqsRecordHandler = async ( return body; }; -export const kinesisRecordHandler = (record: KinesisStreamRecord): string => { +const kinesisRecordHandler = (record: KinesisStreamRecord): string => { const body = record.kinesis.data; if (body.includes('fail')) { throw Error('Failed to process record.'); @@ -29,7 +27,7 @@ export const kinesisRecordHandler = (record: KinesisStreamRecord): string => { return body; }; -export const asyncKinesisRecordHandler = async ( +const asyncKinesisRecordHandler = async ( record: KinesisStreamRecord ): Promise => { const body = record.kinesis.data; @@ -40,7 +38,7 @@ export const asyncKinesisRecordHandler = async ( return body; }; -export const dynamodbRecordHandler = (record: DynamoDBRecord): object => { +const dynamodbRecordHandler = (record: DynamoDBRecord): object => { const body = record.dynamodb?.NewImage?.Message || { S: 'fail' }; if (body['S']?.includes('fail')) { throw Error('Failed to process record.'); @@ -49,13 +47,22 @@ export const dynamodbRecordHandler = (record: DynamoDBRecord): object => { return body; }; -export const asyncDynamodbRecordHandler = async ( +const asyncDynamodbRecordHandler = async ( record: DynamoDBRecord ): Promise => { - const body = (await record.dynamodb?.NewImage?.Message) || { S: 'fail' }; + const body = record.dynamodb?.NewImage?.Message || { S: 'fail' }; if (body['S']?.includes('fail')) { throw Error('Failed to process record.'); } return body; }; + +export { + sqsRecordHandler, + asyncSqsRecordHandler, + kinesisRecordHandler, + asyncKinesisRecordHandler, + dynamodbRecordHandler, + asyncDynamodbRecordHandler, +}; From 500fbe0843376c9b1e1321f4c2a94a21ebd661c6 Mon Sep 17 00:00:00 2001 From: erikayao93 Date: Mon, 10 Jul 2023 17:23:44 +0000 Subject: [PATCH 12/24] Updated imports, refactored randomization in factories --- packages/batch/src/constants.ts | 2 +- packages/batch/tests/helpers/factories.ts | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/packages/batch/src/constants.ts b/packages/batch/src/constants.ts index b8335520a3..b2630475a2 100644 --- a/packages/batch/src/constants.ts +++ b/packages/batch/src/constants.ts @@ -2,7 +2,7 @@ * Constants for batch processor classes */ import { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; -import { BatchResponse, EventSourceDataClassTypes } from '.'; +import type { BatchResponse, EventSourceDataClassTypes } from '.'; enum EventType { SQS = 'SQS', diff --git a/packages/batch/tests/helpers/factories.ts b/packages/batch/tests/helpers/factories.ts index 52d789d43a..883983a849 100644 --- a/packages/batch/tests/helpers/factories.ts +++ b/packages/batch/tests/helpers/factories.ts @@ -1,4 +1,5 @@ import { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; +import { randomInt } from 'crypto'; import { v4 } from 'uuid'; const sqsEventFactory = (body: string): SQSRecord => { @@ -23,7 +24,7 @@ const sqsEventFactory = (body: string): SQSRecord => { const kinesisEventFactory = (body: string): KinesisStreamRecord => { let seq = ''; for (let i = 0; i < 52; i++) { - seq = seq + Math.floor(Math.random() * 10); + seq = seq + randomInt(10); } return { @@ -48,7 +49,7 @@ const kinesisEventFactory = (body: string): KinesisStreamRecord => { const dynamodbEventFactory = (body: string): DynamoDBRecord => { let seq = ''; for (let i = 0; i < 10; i++) { - seq = seq + Math.floor(Math.random() * 10); + seq = seq + randomInt(10); } return { From 002ef9e92b41a853c46bf07d700bb206b8477c3e Mon Sep 17 00:00:00 2001 From: erikayao93 Date: Mon, 10 Jul 2023 17:29:37 +0000 Subject: [PATCH 13/24] Refactored EventType to be const instead of enum --- packages/batch/src/BasePartialBatchProcessor.ts | 6 +++--- packages/batch/src/constants.ts | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/packages/batch/src/BasePartialBatchProcessor.ts b/packages/batch/src/BasePartialBatchProcessor.ts index ecf31f7f20..fac2e253dc 100644 --- a/packages/batch/src/BasePartialBatchProcessor.ts +++ b/packages/batch/src/BasePartialBatchProcessor.ts @@ -18,13 +18,13 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { public batchResponse: BatchResponse; - public eventType: EventType; + public eventType: keyof typeof EventType; /** * Initializes base batch processing class * @param eventType Whether this is SQS, DynamoDB stream, or Kinesis data stream event */ - public constructor(eventType: EventType) { + public constructor(eventType: keyof typeof EventType) { super(); this.eventType = eventType; this.batchResponse = DEFAULT_RESPONSE; @@ -152,7 +152,7 @@ abstract class BasePartialBatchProcessor extends BasePartialProcessor { public toBatchType( record: EventSourceDataClassTypes, - eventType: EventType + eventType: keyof typeof EventType ): SQSRecord | KinesisStreamRecord | DynamoDBRecord { return DATA_CLASS_MAPPING[eventType](record); } diff --git a/packages/batch/src/constants.ts b/packages/batch/src/constants.ts index b2630475a2..f7be1aa447 100644 --- a/packages/batch/src/constants.ts +++ b/packages/batch/src/constants.ts @@ -4,11 +4,11 @@ import { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; import type { BatchResponse, EventSourceDataClassTypes } from '.'; -enum EventType { - SQS = 'SQS', - KinesisDataStreams = 'KinesisDataStreams', - DynamoDBStreams = 'DynamoDBStreams', -} +const EventType = { + SQS: 'SQS', + KinesisDataStreams: 'KinesisDataStreams', + DynamoDBStreams: 'DynamoDBStreams', +} as const; const DEFAULT_RESPONSE: BatchResponse = { batchItemFailures: [], From d9be091a5470be9d5a2bc062e3444212a2a31637 Mon Sep 17 00:00:00 2001 From: erikayao93 Date: Mon, 10 Jul 2023 17:51:30 +0000 Subject: [PATCH 14/24] Refactored and added documentation for errors --- packages/batch/src/errors.ts | 42 +++++++++++++++++++++++++----------- 1 file changed, 29 insertions(+), 13 deletions(-) diff --git a/packages/batch/src/errors.ts b/packages/batch/src/errors.ts index 40c7129530..477b5c33af 100644 --- a/packages/batch/src/errors.ts +++ b/packages/batch/src/errors.ts @@ -2,25 +2,40 @@ * Batch processing exceptions */ +/** + * Base error type for batch processing + * All errors thrown by major failures extend this base class + */ class BaseBatchProcessingError extends Error { - public childExceptions: Error[]; + public childErrors: Error[]; public msg: string; - public constructor(msg: string, childExceptions: Error[]) { + public constructor(msg: string, childErrors: Error[]) { super(msg); this.msg = msg; - this.childExceptions = childExceptions; + this.childErrors = childErrors; } - public formatExceptions(parentExceptionString: string): string { - const exceptionList: string[] = [parentExceptionString + '\n']; - - for (const exception of this.childExceptions) { - exceptionList.push(exception.message); + /** + * Generates a list of errors that were generated by the major failure + * @returns Formatted string listing all the errors that occurred + * + * @example + * When all batch records fail to be processed, this will generate a string like: + * All records failed processing. 3 individual errors logged separately below. + * ,Failed to process record. + * ,Failed to process record. + * ,Failed to process record. + */ + public formatErrors(parentErrorString: string): string { + const errorList: string[] = [parentErrorString + '\n']; + + for (const error of this.childErrors) { + errorList.push(error.message + '\n'); } - return '\n' + exceptionList; + return '\n' + errorList; } } @@ -28,10 +43,11 @@ class BaseBatchProcessingError extends Error { * When all batch records failed to be processed */ class BatchProcessingError extends BaseBatchProcessingError { - public constructor(msg: string, childExceptions: Error[]) { - super(msg, childExceptions); - const parentExceptionString: string = this.message; - this.message = this.formatExceptions(parentExceptionString); + public constructor(msg: string, childErrors: Error[]) { + super(msg, childErrors); + const parentErrorString: string = this.message; + this.message = this.formatErrors(parentErrorString); + console.log(this.message); } } From 604f04d1a92600ed63fcff44f926853f904d2106 Mon Sep 17 00:00:00 2001 From: erikayao93 Date: Mon, 10 Jul 2023 17:52:29 +0000 Subject: [PATCH 15/24] Removed debugging line --- packages/batch/src/errors.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/batch/src/errors.ts b/packages/batch/src/errors.ts index 477b5c33af..f319166602 100644 --- a/packages/batch/src/errors.ts +++ b/packages/batch/src/errors.ts @@ -47,7 +47,6 @@ class BatchProcessingError extends BaseBatchProcessingError { super(msg, childErrors); const parentErrorString: string = this.message; this.message = this.formatErrors(parentErrorString); - console.log(this.message); } } From 9f1696b50230af800b3df65cc02e37a937808bfd Mon Sep 17 00:00:00 2001 From: Alexander Schueren Date: Mon, 10 Jul 2023 20:56:40 +0200 Subject: [PATCH 16/24] chore(ci): add canary to layer deployment (#1593) --- .github/scripts/setup_tmp_layer_files.sh | 3 +- .../workflows/reusable_deploy_layer_stack.yml | 2 + layers/bin/layers.ts | 7 ++ layers/src/canary-stack.ts | 87 +++++++++++++++++++ .../layerPublisher.class.test.functionCode.ts | 32 ++++--- 5 files changed, 113 insertions(+), 18 deletions(-) create mode 100644 layers/src/canary-stack.ts diff --git a/.github/scripts/setup_tmp_layer_files.sh b/.github/scripts/setup_tmp_layer_files.sh index 77c5875734..9d6da9635a 100644 --- a/.github/scripts/setup_tmp_layer_files.sh +++ b/.github/scripts/setup_tmp_layer_files.sh @@ -6,7 +6,8 @@ npm init -y npm i \ @aws-lambda-powertools/logger@$VERSION \ @aws-lambda-powertools/metrics@$VERSION \ - @aws-lambda-powertools/tracer@$VERSION + @aws-lambda-powertools/tracer@$VERSION \ + @aws-lambda-powertools/parameters@$VERSION rm -rf node_modules/@types \ package.json \ package-lock.json diff --git a/.github/workflows/reusable_deploy_layer_stack.yml b/.github/workflows/reusable_deploy_layer_stack.yml index 86b0f5afb6..e044615f1c 100644 --- a/.github/workflows/reusable_deploy_layer_stack.yml +++ b/.github/workflows/reusable_deploy_layer_stack.yml @@ -94,6 +94,8 @@ jobs: path: ./cdk-layer-stack/* # NOTE: upload-artifact does not inherit working-directory setting. if-no-files-found: error retention-days: 1 + - name: CDK deploy canary + run: npm run cdk -w layer -- deploy --app cdk.out --context region=${{ matrix.region }} 'CanaryStack' --require-approval never --verbose --outputs-file cdk-outputs.json update_layer_arn_docs: needs: deploy-cdk-stack permissions: diff --git a/layers/bin/layers.ts b/layers/bin/layers.ts index 902bd6de5f..e945a14aba 100644 --- a/layers/bin/layers.ts +++ b/layers/bin/layers.ts @@ -2,6 +2,7 @@ import 'source-map-support/register'; import { App } from 'aws-cdk-lib'; import { LayerPublisherStack } from '../src/layer-publisher-stack'; +import { CanaryStack } from 'layers/src/canary-stack'; const SSM_PARAM_LAYER_ARN = '/layers/powertools-layer-arn'; @@ -12,3 +13,9 @@ new LayerPublisherStack(app, 'LayerPublisherStack', { layerName: 'AWSLambdaPowertoolsTypeScript', ssmParameterLayerArn: SSM_PARAM_LAYER_ARN, }); + +new CanaryStack(app, 'CanaryStack', { + powertoolsPackageVersion: app.node.tryGetContext('PowertoolsPackageVersion'), + ssmParameterLayerArn: SSM_PARAM_LAYER_ARN, + layerName: 'AWSLambdaPowertoolsCanaryTypeScript', +}); diff --git a/layers/src/canary-stack.ts b/layers/src/canary-stack.ts new file mode 100644 index 0000000000..4847af21d3 --- /dev/null +++ b/layers/src/canary-stack.ts @@ -0,0 +1,87 @@ +import { CustomResource, Duration, Stack, StackProps } from 'aws-cdk-lib'; +import { Construct } from 'constructs'; +import { LayerVersion, Runtime } from 'aws-cdk-lib/aws-lambda'; +import { RetentionDays } from 'aws-cdk-lib/aws-logs'; +import { v4 } from 'uuid'; +import { Effect, PolicyStatement } from 'aws-cdk-lib/aws-iam'; +import { Provider } from 'aws-cdk-lib/custom-resources'; +import { StringParameter } from 'aws-cdk-lib/aws-ssm'; +import path from 'path'; +import { NodejsFunction } from 'aws-cdk-lib/aws-lambda-nodejs'; + +export interface CanaryStackProps extends StackProps { + readonly layerName: string; + readonly powertoolsPackageVersion: string; + readonly ssmParameterLayerArn: string; +} + +export class CanaryStack extends Stack { + public constructor(scope: Construct, id: string, props: CanaryStackProps) { + super(scope, id, props); + const { layerName, powertoolsPackageVersion } = props; + + const suffix = v4().substring(0, 5); + + const layerArn = StringParameter.fromStringParameterAttributes( + this, + 'LayerArn', + { + parameterName: props.ssmParameterLayerArn, + } + ).stringValue; + + // lambda function + const layer = [ + LayerVersion.fromLayerVersionArn(this, 'powertools-layer', layerArn), + ]; + + const canaryFunction = new NodejsFunction(this, 'CanaryFunction', { + entry: path.join( + __dirname, + '../tests/e2e/layerPublisher.class.test.functionCode.ts' + ), + handler: 'handler', + runtime: Runtime.NODEJS_18_X, + functionName: `canary-${suffix}`, + timeout: Duration.seconds(30), + bundling: { + externalModules: [ + // don't package these modules, we want to pull them from the layer + 'aws-sdk', + '@aws-lambda-powertools/logger', + '@aws-lambda-powertools/metrics', + '@aws-lambda-powertools/tracer', + '@aws-lambda-powertools/parameters', + '@aws-lambda-powertools/commons', + ], + }, + environment: { + POWERTOOLS_SERVICE_NAME: 'canary', + POWERTOOLS_PACKAGE_VERSION: powertoolsPackageVersion, + POWERTOOLS_LAYER_NAME: layerName, + SSM_PARAMETER_LAYER_ARN: props.ssmParameterLayerArn, + }, + layers: layer, + logRetention: RetentionDays.ONE_DAY, + }); + + canaryFunction.addToRolePolicy( + new PolicyStatement({ + actions: ['ssm:GetParameter'], + resources: ['*'], + effect: Effect.ALLOW, + }) + ); + + // use custom resource to trigger the lambda function during the CFN deployment + const provider = new Provider(this, 'CanaryCustomResourceProvider', { + onEventHandler: canaryFunction, + logRetention: RetentionDays.ONE_DAY, + }); + + // random suffix forces recreation of the custom resource otherwise the custom resource will be reused from prevous deployment + new CustomResource(this, `CanaryCustomResource${suffix}`, { + serviceToken: provider.serviceToken, + }); + } +} diff --git a/layers/tests/e2e/layerPublisher.class.test.functionCode.ts b/layers/tests/e2e/layerPublisher.class.test.functionCode.ts index 6606efffbc..28e60424e8 100644 --- a/layers/tests/e2e/layerPublisher.class.test.functionCode.ts +++ b/layers/tests/e2e/layerPublisher.class.test.functionCode.ts @@ -2,33 +2,31 @@ import { readFileSync } from 'node:fs'; import { Logger } from '@aws-lambda-powertools/logger'; import { Metrics } from '@aws-lambda-powertools/metrics'; import { Tracer } from '@aws-lambda-powertools/tracer'; +import { SSMProvider } from '@aws-lambda-powertools/parameters/ssm'; const logger = new Logger({ logLevel: 'DEBUG', }); const metrics = new Metrics(); const tracer = new Tracer(); +new SSMProvider(); export const handler = (): void => { // Check that the packages version matches the expected one - try { - const packageJSON = JSON.parse( - readFileSync( - '/opt/nodejs/node_modules/@aws-lambda-powertools/logger/package.json', - { - encoding: 'utf8', - flag: 'r', - } - ) - ); + const packageJSON = JSON.parse( + readFileSync( + '/opt/nodejs/node_modules/@aws-lambda-powertools/logger/package.json', + { + encoding: 'utf8', + flag: 'r', + } + ) + ); - if (packageJSON.version != process.env.POWERTOOLS_PACKAGE_VERSION) { - throw new Error( - `Package version mismatch: ${packageJSON.version} != ${process.env.POWERTOOLS_PACKAGE_VERSION}` - ); - } - } catch (error) { - console.error(error); + if (packageJSON.version != process.env.POWERTOOLS_PACKAGE_VERSION) { + throw new Error( + `Package version mismatch: ${packageJSON.version} != ${process.env.POWERTOOLS_PACKAGE_VERSION}` + ); } // Check that the logger is working From 3b5ccc1fc68ca781a665862e8dcb98bdec528fb5 Mon Sep 17 00:00:00 2001 From: Andrea Amorosi Date: Tue, 11 Jul 2023 08:57:43 +0200 Subject: [PATCH 17/24] docs(idempotency): write utility docs (#1592) * docs: base docs * wip * chore: added paths to snippets tsconfig * chore: added page to docs menu * docs(idempotency): utility docs * highlights * chore: remove CDK mention --- .gitignore | 1 + .../idempotency/customizePersistenceLayer.ts | 35 + .../idempotency/makeHandlerIdempotent.ts | 40 + .../idempotency/makeIdempotentAnyFunction.ts | 59 ++ .../idempotency/makeIdempotentBase.ts | 38 + .../idempotency/makeIdempotentJmes.ts | 51 ++ .../makeIdempotentLambdaContext.ts | 51 ++ .../idempotency/requiredIdempotencyKey.ts | 20 + docs/snippets/idempotency/types.ts | 15 + .../idempotency/workingWithCompositeKey.ts | 30 + .../idempotency/workingWithCustomClient.ts | 32 + .../idempotency/workingWithCustomConfig.ts | 30 + .../idempotency/workingWithExceptions.ts | 59 ++ .../workingWithIdempotencyRequiredKey.ts | 36 + .../idempotency/workingWithLocalCache.ts | 35 + .../workingWithPayloadValidation.ts | 58 ++ .../workingWithRecordExpiration.ts | 35 + docs/snippets/package.json | 2 +- docs/snippets/tsconfig.json | 12 + docs/utilities/idempotency.md | 797 ++++++++++++++++++ mkdocs.yml | 1 + packages/idempotency/README.md | 2 +- 22 files changed, 1437 insertions(+), 2 deletions(-) create mode 100644 docs/snippets/idempotency/customizePersistenceLayer.ts create mode 100644 docs/snippets/idempotency/makeHandlerIdempotent.ts create mode 100644 docs/snippets/idempotency/makeIdempotentAnyFunction.ts create mode 100644 docs/snippets/idempotency/makeIdempotentBase.ts create mode 100644 docs/snippets/idempotency/makeIdempotentJmes.ts create mode 100644 docs/snippets/idempotency/makeIdempotentLambdaContext.ts create mode 100644 docs/snippets/idempotency/requiredIdempotencyKey.ts create mode 100644 docs/snippets/idempotency/types.ts create mode 100644 docs/snippets/idempotency/workingWithCompositeKey.ts create mode 100644 docs/snippets/idempotency/workingWithCustomClient.ts create mode 100644 docs/snippets/idempotency/workingWithCustomConfig.ts create mode 100644 docs/snippets/idempotency/workingWithExceptions.ts create mode 100644 docs/snippets/idempotency/workingWithIdempotencyRequiredKey.ts create mode 100644 docs/snippets/idempotency/workingWithLocalCache.ts create mode 100644 docs/snippets/idempotency/workingWithPayloadValidation.ts create mode 100644 docs/snippets/idempotency/workingWithRecordExpiration.ts create mode 100644 docs/utilities/idempotency.md diff --git a/.gitignore b/.gitignore index 489c489c75..eab53c4c25 100644 --- a/.gitignore +++ b/.gitignore @@ -33,6 +33,7 @@ coverage # Python virtual environments (for running mkdocs locally) venv +.venv # Static documentation site generated by Mkdocs site diff --git a/docs/snippets/idempotency/customizePersistenceLayer.ts b/docs/snippets/idempotency/customizePersistenceLayer.ts new file mode 100644 index 0000000000..138da79b9c --- /dev/null +++ b/docs/snippets/idempotency/customizePersistenceLayer.ts @@ -0,0 +1,35 @@ +import { makeHandlerIdempotent } from '@aws-lambda-powertools/idempotency/middleware'; +import { DynamoDBPersistenceLayer } from '@aws-lambda-powertools/idempotency/dynamodb'; +import middy from '@middy/core'; +import type { Context } from 'aws-lambda'; +import type { Request, Response } from './types'; + +const persistenceStore = new DynamoDBPersistenceLayer({ + tableName: 'idempotencyTableName', + keyAttr: 'idempotencyKey', + expiryAttr: 'expiresAt', + inProgressExpiryAttr: 'inProgressExpiresAt', + statusAttr: 'currentStatus', + dataAttr: 'resultData', + validationKeyAttr: 'validationKey', +}); + +export const handler = middy( + async (_event: Request, _context: Context): Promise => { + try { + // ... create payment + + return { + paymentId: '1234567890', + message: 'success', + statusCode: 200, + }; + } catch (error) { + throw new Error('Error creating payment'); + } + } +).use( + makeHandlerIdempotent({ + persistenceStore, + }) +); diff --git a/docs/snippets/idempotency/makeHandlerIdempotent.ts b/docs/snippets/idempotency/makeHandlerIdempotent.ts new file mode 100644 index 0000000000..3989b15cd6 --- /dev/null +++ b/docs/snippets/idempotency/makeHandlerIdempotent.ts @@ -0,0 +1,40 @@ +import { randomUUID } from 'node:crypto'; +import { makeHandlerIdempotent } from '@aws-lambda-powertools/idempotency/middleware'; +import { DynamoDBPersistenceLayer } from '@aws-lambda-powertools/idempotency/dynamodb'; +import middy from '@middy/core'; +import type { Context } from 'aws-lambda'; +import type { Request, Response, SubscriptionResult } from './types'; + +const persistenceStore = new DynamoDBPersistenceLayer({ + tableName: 'idempotencyTableName', +}); + +const createSubscriptionPayment = async ( + event: Request +): Promise => { + // ... create payment + return { + id: randomUUID(), + productId: event.productId, + }; +}; + +export const handler = middy( + async (event: Request, _context: Context): Promise => { + try { + const payment = await createSubscriptionPayment(event); + + return { + paymentId: payment.id, + message: 'success', + statusCode: 200, + }; + } catch (error) { + throw new Error('Error creating payment'); + } + } +).use( + makeHandlerIdempotent({ + persistenceStore, + }) +); diff --git a/docs/snippets/idempotency/makeIdempotentAnyFunction.ts b/docs/snippets/idempotency/makeIdempotentAnyFunction.ts new file mode 100644 index 0000000000..ead0ca408e --- /dev/null +++ b/docs/snippets/idempotency/makeIdempotentAnyFunction.ts @@ -0,0 +1,59 @@ +import { randomUUID } from 'node:crypto'; +import { + makeIdempotent, + IdempotencyConfig, +} from '@aws-lambda-powertools/idempotency'; +import { DynamoDBPersistenceLayer } from '@aws-lambda-powertools/idempotency/dynamodb'; +import type { Context } from 'aws-lambda'; +import type { Request, Response, SubscriptionResult } from './types'; + +const persistenceStore = new DynamoDBPersistenceLayer({ + tableName: 'idempotencyTableName', +}); +const config = new IdempotencyConfig({}); + +const reportSubscriptionMetrics = async ( + _transactionId: string, + _user: string +): Promise => { + // ... send notification +}; + +const createSubscriptionPayment = makeIdempotent( + async ( + transactionId: string, + event: Request + ): Promise => { + // ... create payment + return { + id: transactionId, + productId: event.productId, + }; + }, + { + persistenceStore, + dataIndexArgument: 1, + config, + } +); + +export const handler = async ( + event: Request, + context: Context +): Promise => { + config.registerLambdaContext(context); + try { + const transactionId = randomUUID(); + const payment = await createSubscriptionPayment(transactionId, event); + + await reportSubscriptionMetrics(transactionId, event.user); + + return { + paymentId: payment.id, + message: 'success', + statusCode: 200, + }; + } catch (error) { + throw new Error('Error creating payment'); + } +}; diff --git a/docs/snippets/idempotency/makeIdempotentBase.ts b/docs/snippets/idempotency/makeIdempotentBase.ts new file mode 100644 index 0000000000..857fa8dfab --- /dev/null +++ b/docs/snippets/idempotency/makeIdempotentBase.ts @@ -0,0 +1,38 @@ +import { randomUUID } from 'node:crypto'; +import { makeIdempotent } from '@aws-lambda-powertools/idempotency'; +import { DynamoDBPersistenceLayer } from '@aws-lambda-powertools/idempotency/dynamodb'; +import type { Context } from 'aws-lambda'; +import type { Request, Response, SubscriptionResult } from './types'; + +const persistenceStore = new DynamoDBPersistenceLayer({ + tableName: 'idempotencyTableName', +}); + +const createSubscriptionPayment = async ( + event: Request +): Promise => { + // ... create payment + return { + id: randomUUID(), + productId: event.productId, + }; +}; + +export const handler = makeIdempotent( + async (event: Request, _context: Context): Promise => { + try { + const payment = await createSubscriptionPayment(event); + + return { + paymentId: payment.id, + message: 'success', + statusCode: 200, + }; + } catch (error) { + throw new Error('Error creating payment'); + } + }, + { + persistenceStore, + } +); diff --git a/docs/snippets/idempotency/makeIdempotentJmes.ts b/docs/snippets/idempotency/makeIdempotentJmes.ts new file mode 100644 index 0000000000..b460f6df05 --- /dev/null +++ b/docs/snippets/idempotency/makeIdempotentJmes.ts @@ -0,0 +1,51 @@ +import { randomUUID } from 'node:crypto'; +import { + makeIdempotent, + IdempotencyConfig, +} from '@aws-lambda-powertools/idempotency'; +import { DynamoDBPersistenceLayer } from '@aws-lambda-powertools/idempotency/dynamodb'; +import type { Context } from 'aws-lambda'; +import type { Request, Response, SubscriptionResult } from './types'; + +const persistenceStore = new DynamoDBPersistenceLayer({ + tableName: 'idempotencyTableName', +}); + +const createSubscriptionPayment = async ( + user: string, + productId: string +): Promise => { + // ... create payment + return { + id: randomUUID(), + productId: productId, + }; +}; + +// Extract the idempotency key from the request headers +const config = new IdempotencyConfig({ + eventKeyJmesPath: 'headers."X-Idempotency-Key"', +}); + +export const handler = makeIdempotent( + async (event: Request, _context: Context): Promise => { + try { + const payment = await createSubscriptionPayment( + event.user, + event.productId + ); + + return { + paymentId: payment.id, + message: 'success', + statusCode: 200, + }; + } catch (error) { + throw new Error('Error creating payment'); + } + }, + { + persistenceStore, + config, + } +); diff --git a/docs/snippets/idempotency/makeIdempotentLambdaContext.ts b/docs/snippets/idempotency/makeIdempotentLambdaContext.ts new file mode 100644 index 0000000000..d90ee17eb1 --- /dev/null +++ b/docs/snippets/idempotency/makeIdempotentLambdaContext.ts @@ -0,0 +1,51 @@ +import { randomUUID } from 'node:crypto'; +import { + makeIdempotent, + IdempotencyConfig, +} from '@aws-lambda-powertools/idempotency'; +import { DynamoDBPersistenceLayer } from '@aws-lambda-powertools/idempotency/dynamodb'; +import type { Context } from 'aws-lambda'; +import type { Request, Response, SubscriptionResult } from './types'; + +const persistenceStore = new DynamoDBPersistenceLayer({ + tableName: 'idempotencyTableName', +}); +const config = new IdempotencyConfig({}); + +const createSubscriptionPayment = makeIdempotent( + async ( + transactionId: string, + event: Request + ): Promise => { + // ... create payment + return { + id: transactionId, + productId: event.productId, + }; + }, + { + persistenceStore, + dataIndexArgument: 1, + config, + } +); + +export const handler = async ( + event: Request, + context: Context +): Promise => { + // Register the Lambda context to the IdempotencyConfig instance + config.registerLambdaContext(context); + try { + const transactionId = randomUUID(); + const payment = await createSubscriptionPayment(transactionId, event); + + return { + paymentId: payment.id, + message: 'success', + statusCode: 200, + }; + } catch (error) { + throw new Error('Error creating payment'); + } +}; diff --git a/docs/snippets/idempotency/requiredIdempotencyKey.ts b/docs/snippets/idempotency/requiredIdempotencyKey.ts new file mode 100644 index 0000000000..1f6a13e286 --- /dev/null +++ b/docs/snippets/idempotency/requiredIdempotencyKey.ts @@ -0,0 +1,20 @@ +import { + IdempotencyConfig, + makeIdempotent, +} from '@aws-lambda-powertools/idempotency'; +import { DynamoDBPersistenceLayer } from '@aws-lambda-powertools/idempotency/dynamodb'; + +const persistenceStore = new DynamoDBPersistenceLayer({ + tableName: 'IdempotencyTable', +}); + +// Requires "user"."uid" and "orderId" to be present +const config = new IdempotencyConfig({ + eventKeyJmesPath: '[user.uid, orderId]', + throwOnNoIdempotencyKey: true, +}); + +export const handler = makeIdempotent((_event: unknown) => ({}), { + persistenceStore, + config, +}); diff --git a/docs/snippets/idempotency/types.ts b/docs/snippets/idempotency/types.ts new file mode 100644 index 0000000000..42d2cd63bd --- /dev/null +++ b/docs/snippets/idempotency/types.ts @@ -0,0 +1,15 @@ +type Request = { + user: string; + productId: string; +}; + +type Response = { + [key: string]: unknown; +}; + +type SubscriptionResult = { + id: string; + productId: string; +}; + +export { Request, Response, SubscriptionResult }; diff --git a/docs/snippets/idempotency/workingWithCompositeKey.ts b/docs/snippets/idempotency/workingWithCompositeKey.ts new file mode 100644 index 0000000000..8b13d122c7 --- /dev/null +++ b/docs/snippets/idempotency/workingWithCompositeKey.ts @@ -0,0 +1,30 @@ +import { makeHandlerIdempotent } from '@aws-lambda-powertools/idempotency/middleware'; +import { DynamoDBPersistenceLayer } from '@aws-lambda-powertools/idempotency/dynamodb'; +import middy from '@middy/core'; +import type { Context } from 'aws-lambda'; +import type { Request, Response } from './types'; + +const persistenceStore = new DynamoDBPersistenceLayer({ + tableName: 'idempotencyTableName', + sortKeyAttr: 'sort_key', +}); + +export const handler = middy( + async (_event: Request, _context: Context): Promise => { + try { + // ... create payment + + return { + paymentId: '12345', + message: 'success', + statusCode: 200, + }; + } catch (error) { + throw new Error('Error creating payment'); + } + } +).use( + makeHandlerIdempotent({ + persistenceStore, + }) +); diff --git a/docs/snippets/idempotency/workingWithCustomClient.ts b/docs/snippets/idempotency/workingWithCustomClient.ts new file mode 100644 index 0000000000..1577912f10 --- /dev/null +++ b/docs/snippets/idempotency/workingWithCustomClient.ts @@ -0,0 +1,32 @@ +import { makeIdempotent } from '@aws-lambda-powertools/idempotency'; +import { DynamoDBPersistenceLayer } from '@aws-lambda-powertools/idempotency/dynamodb'; +import { DynamoDBClient } from '@aws-sdk/client-dynamodb'; +import type { Context } from 'aws-lambda'; +import type { Request, Response } from './types'; + +const customDynamoDBClient = new DynamoDBClient({ + endpoint: 'http://localhost:8000', +}); +const persistenceStore = new DynamoDBPersistenceLayer({ + tableName: 'idempotencyTableName', + awsSdkV3Client: customDynamoDBClient, +}); + +export const handler = makeIdempotent( + async (_event: Request, _context: Context): Promise => { + try { + // ... create payment + + return { + paymentId: '12345', + message: 'success', + statusCode: 200, + }; + } catch (error) { + throw new Error('Error creating payment'); + } + }, + { + persistenceStore, + } +); diff --git a/docs/snippets/idempotency/workingWithCustomConfig.ts b/docs/snippets/idempotency/workingWithCustomConfig.ts new file mode 100644 index 0000000000..6507a1a32c --- /dev/null +++ b/docs/snippets/idempotency/workingWithCustomConfig.ts @@ -0,0 +1,30 @@ +import { makeIdempotent } from '@aws-lambda-powertools/idempotency'; +import { DynamoDBPersistenceLayer } from '@aws-lambda-powertools/idempotency/dynamodb'; +import type { Context } from 'aws-lambda'; +import type { Request, Response } from './types'; + +const persistenceStore = new DynamoDBPersistenceLayer({ + tableName: 'idempotencyTableName', + clientConfig: { + region: 'us-east-1', + }, +}); + +export const handler = makeIdempotent( + async (_event: Request, _context: Context): Promise => { + try { + // ... create payment + + return { + paymentId: '12345', + message: 'success', + statusCode: 200, + }; + } catch (error) { + throw new Error('Error creating payment'); + } + }, + { + persistenceStore, + } +); diff --git a/docs/snippets/idempotency/workingWithExceptions.ts b/docs/snippets/idempotency/workingWithExceptions.ts new file mode 100644 index 0000000000..60d957d893 --- /dev/null +++ b/docs/snippets/idempotency/workingWithExceptions.ts @@ -0,0 +1,59 @@ +import { randomUUID } from 'node:crypto'; +import { + makeIdempotent, + IdempotencyConfig, +} from '@aws-lambda-powertools/idempotency'; +import { DynamoDBPersistenceLayer } from '@aws-lambda-powertools/idempotency/dynamodb'; +import type { Context } from 'aws-lambda'; +import type { Request, Response, SubscriptionResult } from './types'; + +const persistenceStore = new DynamoDBPersistenceLayer({ + tableName: 'idempotencyTableName', +}); +const config = new IdempotencyConfig({}); + +const createSubscriptionPayment = makeIdempotent( + async ( + transactionId: string, + event: Request + ): Promise => { + // ... create payment + return { + id: transactionId, + productId: event.productId, + }; + }, + { + persistenceStore, + dataIndexArgument: 1, + config, + } +); + +export const handler = async ( + event: Request, + context: Context +): Promise => { + config.registerLambdaContext(context); + /** + * If an exception is thrown before the wrapped function is called, + * no idempotency record is created. + */ + try { + const transactionId = randomUUID(); + const payment = await createSubscriptionPayment(transactionId, event); + + /** + * If an exception is thrown after the wrapped function is called, + * the idempotency record won't be affected so it's safe to retry. + */ + + return { + paymentId: payment.id, + message: 'success', + statusCode: 200, + }; + } catch (error) { + throw new Error('Error creating payment'); + } +}; diff --git a/docs/snippets/idempotency/workingWithIdempotencyRequiredKey.ts b/docs/snippets/idempotency/workingWithIdempotencyRequiredKey.ts new file mode 100644 index 0000000000..9642e6a630 --- /dev/null +++ b/docs/snippets/idempotency/workingWithIdempotencyRequiredKey.ts @@ -0,0 +1,36 @@ +import { + makeIdempotent, + IdempotencyConfig, +} from '@aws-lambda-powertools/idempotency'; +import { DynamoDBPersistenceLayer } from '@aws-lambda-powertools/idempotency/dynamodb'; +import type { Context } from 'aws-lambda'; +import type { Request, Response } from './types'; + +const persistenceStore = new DynamoDBPersistenceLayer({ + tableName: 'idempotencyTableName', +}); + +const config = new IdempotencyConfig({ + throwOnNoIdempotencyKey: true, + eventKeyJmesPath: '["user.uid", "productId"]', +}); + +export const handler = makeIdempotent( + async (_event: Request, _context: Context): Promise => { + try { + // ... create payment + + return { + paymentId: '12345', + message: 'success', + statusCode: 200, + }; + } catch (error) { + throw new Error('Error creating payment'); + } + }, + { + persistenceStore, + config, + } +); diff --git a/docs/snippets/idempotency/workingWithLocalCache.ts b/docs/snippets/idempotency/workingWithLocalCache.ts new file mode 100644 index 0000000000..8570f3f055 --- /dev/null +++ b/docs/snippets/idempotency/workingWithLocalCache.ts @@ -0,0 +1,35 @@ +import { IdempotencyConfig } from '@aws-lambda-powertools/idempotency'; +import { makeHandlerIdempotent } from '@aws-lambda-powertools/idempotency/middleware'; +import { DynamoDBPersistenceLayer } from '@aws-lambda-powertools/idempotency/dynamodb'; +import middy from '@middy/core'; +import type { Context } from 'aws-lambda'; +import type { Request, Response } from './types'; + +const persistenceStore = new DynamoDBPersistenceLayer({ + tableName: 'idempotencyTableName', +}); +const config = new IdempotencyConfig({ + useLocalCache: true, + maxLocalCacheSize: 512, +}); + +export const handler = middy( + async (_event: Request, _context: Context): Promise => { + try { + // ... create payment + + return { + paymentId: '1234567890', + message: 'success', + statusCode: 200, + }; + } catch (error) { + throw new Error('Error creating payment'); + } + } +).use( + makeHandlerIdempotent({ + persistenceStore, + config, + }) +); diff --git a/docs/snippets/idempotency/workingWithPayloadValidation.ts b/docs/snippets/idempotency/workingWithPayloadValidation.ts new file mode 100644 index 0000000000..eb582656a0 --- /dev/null +++ b/docs/snippets/idempotency/workingWithPayloadValidation.ts @@ -0,0 +1,58 @@ +import { randomUUID } from 'node:crypto'; +import { + makeIdempotent, + IdempotencyConfig, +} from '@aws-lambda-powertools/idempotency'; +import { DynamoDBPersistenceLayer } from '@aws-lambda-powertools/idempotency/dynamodb'; +import type { Context } from 'aws-lambda'; +import type { Request, Response, SubscriptionResult } from './types'; + +const persistenceStore = new DynamoDBPersistenceLayer({ + tableName: 'idempotencyTableName', +}); +const config = new IdempotencyConfig({ + eventKeyJmesPath: '["userId", "productId"]', + payloadValidationJmesPath: 'amount', +}); + +const fetchProductAmount = async (_transactionId: string): Promise => { + // ... fetch product amount + return 42; +}; + +const createSubscriptionPayment = makeIdempotent( + async (event: Request & { amount: number }): Promise => { + // ... create payment + return { + id: randomUUID(), + productId: event.productId, + }; + }, + { + persistenceStore, + dataIndexArgument: 1, + config, + } +); + +export const handler = async ( + event: Request, + context: Context +): Promise => { + config.registerLambdaContext(context); + try { + const productAmount = await fetchProductAmount(event.productId); + const payment = await createSubscriptionPayment({ + ...event, + amount: productAmount, + }); + + return { + paymentId: payment.id, + message: 'success', + statusCode: 200, + }; + } catch (error) { + throw new Error('Error creating payment'); + } +}; diff --git a/docs/snippets/idempotency/workingWithRecordExpiration.ts b/docs/snippets/idempotency/workingWithRecordExpiration.ts new file mode 100644 index 0000000000..39fa7594eb --- /dev/null +++ b/docs/snippets/idempotency/workingWithRecordExpiration.ts @@ -0,0 +1,35 @@ +import { + makeIdempotent, + IdempotencyConfig, +} from '@aws-lambda-powertools/idempotency'; +import { DynamoDBPersistenceLayer } from '@aws-lambda-powertools/idempotency/dynamodb'; +import type { Context } from 'aws-lambda'; +import type { Request, Response } from './types'; + +const persistenceStore = new DynamoDBPersistenceLayer({ + tableName: 'idempotencyTableName', +}); + +const config = new IdempotencyConfig({ + expiresAfterSeconds: 300, +}); + +export const handler = makeIdempotent( + async (_event: Request, _context: Context): Promise => { + try { + // ... create payment + + return { + paymentId: '12345', + message: 'success', + statusCode: 200, + }; + } catch (error) { + throw new Error('Error creating payment'); + } + }, + { + persistenceStore, + config, + } +); diff --git a/docs/snippets/package.json b/docs/snippets/package.json index 0ce221ad60..5e067b21e3 100644 --- a/docs/snippets/package.json +++ b/docs/snippets/package.json @@ -38,4 +38,4 @@ "axios": "^1.2.4", "hashi-vault-js": "^0.4.13" } -} +} \ No newline at end of file diff --git a/docs/snippets/tsconfig.json b/docs/snippets/tsconfig.json index 6f72111cb7..59dfbd1435 100644 --- a/docs/snippets/tsconfig.json +++ b/docs/snippets/tsconfig.json @@ -29,6 +29,18 @@ "@aws-lambda-powertools/parameters/dynamodb": [ "../../packages/parameters/lib/dynamodb" ], + "@aws-lambda-powertools/idempotency/dynamodb": [ + "../../packages/idempotency/lib/persistence/DynamoDBPersistenceLayer" + ], + "@aws-lambda-powertools/idempotency/persistence": [ + "../../packages/idempotency/lib/persistence" + ], + "@aws-lambda-powertools/idempotency": [ + "../../packages/idempotency/lib" + ], + "@aws-lambda-powertools/idempotency/middleware": [ + "../../packages/idempotency/lib/middleware" + ] }, }, "exclude": [ diff --git a/docs/utilities/idempotency.md b/docs/utilities/idempotency.md new file mode 100644 index 0000000000..9341869b68 --- /dev/null +++ b/docs/utilities/idempotency.md @@ -0,0 +1,797 @@ +--- +title: Idempotency +description: Utility +--- + +???+ warning + **This utility is currently released as beta developer preview** and is intended strictly for feedback and testing purposes **and not for production workloads**. The version and all future versions tagged with the `-beta` suffix should be treated as not stable. Up until before the [General Availability release](https://github.com/aws-powertools/powertools-lambda-typescript/milestone/7) we might introduce significant breaking changes and improvements in response to customers feedback. + +The idempotency utility provides a simple solution to convert your Lambda functions into idempotent operations which are safe to retry. + +## Key features + +* Prevent Lambda handler from executing more than once on the same event payload during a time window +* Ensure Lambda handler returns the same result when called with the same payload +* Select a subset of the event as the idempotency key using JMESPath expressions +* Set a time window in which records with the same payload should be considered duplicates +* Expires in-progress executions if the Lambda function times out halfway through + +## Terminology + +The property of idempotency means that an operation does not cause additional side effects if it is called more than once with the same input parameters. + +**Idempotent operations will return the same result when they are called multiple times with the same parameters**. This makes idempotent operations safe to retry. + +**Idempotency key** is a hash representation of either the entire event or a specific configured subset of the event, and invocation results are **JSON serialized** and stored in your persistence storage layer. + +**Idempotency record** is the data representation of an idempotent request saved in your preferred storage layer. We use it to coordinate whether a request is idempotent, whether it's still valid or expired based on timestamps, etc. + +
+```mermaid +classDiagram + direction LR + class IdempotencyRecord { + idempotencyKey string + status Status + expiryTimestamp number + inProgressExpiryTimestamp number + responseData Json~string~ + payloadHash string + } + class Status { + <> + INPROGRESS + COMPLETE + EXPIRED internal_only + } + IdempotencyRecord -- Status +``` + +Idempotency record representation +
+ +## Getting started + +### IAM Permissions + +Your Lambda function IAM Role must have `dynamodb:GetItem`, `dynamodb:PutItem`, `dynamodb:UpdateItem` and `dynamodb:DeleteItem` IAM permissions before using this feature. + +???+ note + If you're using one of our examples: [AWS Serverless Application Model (SAM)](#required-resources) or [Terraform](#required-resources) the required permissions are already included. + +### Required resources + +Before getting started, you need to create a persistent storage layer where the idempotency utility can store its state - your lambda functions will need read and write access to it. + +As of now, Amazon DynamoDB is the only supported persistent storage layer, so you'll need to create a table first. + +**Default table configuration** + +If you're not [changing the default configuration for the DynamoDB persistence layer](#dynamodbpersistencelayer), this is the expected default configuration: + +| Configuration | Value | Notes | +| ------------------ | ------------ | ----------------------------------------------------------------------------------- | +| Partition key | `id` | +| TTL attribute name | `expiration` | This can only be configured after your table is created if you're using AWS Console | + +???+ tip "Tip: You can share a single state table for all functions" + You can reuse the same DynamoDB table to store idempotency state. We add the Lambda function name in addition to the idempotency key as a hash key. + + +=== "AWS Serverless Application Model (SAM) example" + + ```yaml hl_lines="6-14 24-31" + Transform: AWS::Serverless-2016-10-31 + Resources: + IdempotencyTable: + Type: AWS::DynamoDB::Table + Properties: + AttributeDefinitions: + - AttributeName: id + AttributeType: S + KeySchema: + - AttributeName: id + KeyType: HASH + TimeToLiveSpecification: + AttributeName: expiration + Enabled: true + BillingMode: PAY_PER_REQUEST + + HelloWorldFunction: + Type: AWS::Serverless::Function + Properties: + Runtime: nodejs18.x + Handler: index.handler + Policies: + - Statement: + - Sid: AllowDynamodbReadWrite + Effect: Allow + Action: + - dynamodb:PutItem + - dynamodb:GetItem + - dynamodb:UpdateItem + - dynamodb:DeleteItem + Resource: !GetAtt IdempotencyTable.Arn + ``` + +=== "Terraform" + + ```terraform hl_lines="14-26 64-70" + terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 4.0" + } + } + } + + provider "aws" { + region = "us-east-1" # Replace with your desired AWS region + } + + resource "aws_dynamodb_table" "IdempotencyTable" { + name = "IdempotencyTable" + billing_mode = "PAY_PER_REQUEST" + hash_key = "id" + attribute { + name = "id" + type = "S" + } + ttl { + attribute_name = "expiration" + enabled = true + } + } + + resource "aws_lambda_function" "IdempotencyFunction" { + function_name = "IdempotencyFunction" + role = aws_iam_role.IdempotencyFunctionRole.arn + runtime = "nodejs18.x" + handler = "index.handler" + filename = "lambda.zip" + } + + resource "aws_iam_role" "IdempotencyFunctionRole" { + name = "IdempotencyFunctionRole" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Sid = "" + Effect = "Allow" + Principal = { + Service = "lambda.amazonaws.com" + } + Action = "sts:AssumeRole" + }, + ] + }) + } + + resource "aws_iam_policy" "LambdaDynamoDBPolicy" { + name = "LambdaDynamoDBPolicy" + description = "IAM policy for Lambda function to access DynamoDB" + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Sid = "AllowDynamodbReadWrite" + Effect = "Allow" + Action = [ + "dynamodb:PutItem", + "dynamodb:GetItem", + "dynamodb:UpdateItem", + "dynamodb:DeleteItem", + ] + Resource = aws_dynamodb_table.IdempotencyTable.arn + }, + ] + }) + } + + resource "aws_iam_role_policy_attachment" "IdempotencyFunctionRoleAttachment" { + role = aws_iam_role.IdempotencyFunctionRole.name + policy_arn = aws_iam_policy.LambdaDynamoDBPolicy.arn + } + ``` + +???+ warning "Warning: Large responses with DynamoDB persistence layer" + When using this utility with DynamoDB, your function's responses must be [smaller than 400KB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html#limits-items){target="_blank"}. + + Larger items cannot be written to DynamoDB and will cause exceptions. + +???+ info "Info: DynamoDB" + Each function invocation will generally make 2 requests to DynamoDB. If the + result returned by your Lambda is less than 1kb, you can expect 2 WCUs per invocation. For retried invocations, you will + see 1WCU and 1RCU. Review the [DynamoDB pricing documentation](https://aws.amazon.com/dynamodb/pricing/){target="_blank"} to + estimate the cost. + +### MakeIdempotent function wrapper + +You can quickly start by initializing the `DynamoDBPersistenceLayer` class and using it with the `makeIdempotent` function wrapper on your Lambda handler. + +???+ note + In this example, the entire Lambda handler is treated as a single idempotent operation. If your Lambda handler can cause multiple side effects, or you're only interested in making a specific logic idempotent, use the `makeIdempotent` high-order function only on the function that needs to be idempotent. + +!!! tip "See [Choosing a payload subset for idempotency](#choosing-a-payload-subset-for-idempotency) for more elaborate use cases." + +=== "index.ts" + + ```typescript hl_lines="2-3 21 35-38" + --8<-- "docs/snippets/idempotency/makeIdempotentBase.ts" + ``` + +=== "Types" + + ```typescript + --8<-- "docs/snippets/idempotency/types.ts::13" + ``` + +After processing this request successfully, a second request containing the exact same payload above will now return the same response, ensuring our customer isn't charged twice. + +!!! question "New to idempotency concept? Please review our [Terminology](#terminology) section if you haven't yet." + +You can also use the `makeIdempotent` function wrapper on any function that returns a response to make it idempotent. This is useful when you want to make a specific logic idempotent, for example when your Lambda handler performs multiple side effects and you only want to make a specific one idempotent. + +???+ warning "Limitation" + Make sure to return a JSON serializable response from your function, otherwise you'll get an error. + +When using `makeIdempotent` on arbitrary functions, you can tell us which argument in your function signature has the data we should use via **`dataIndexArgument`**. If you don't specify this argument, we'll use the first argument in the function signature. + +???+ note + The function in the example below has two arguments, note that while wrapping it with the `makeIdempotent` high-order function, we specify the `dataIndexArgument` as `1` to tell the decorator that the second argument is the one that contains the data we should use to make the function idempotent. Remember that arguments are zero-indexed, so the first argument is `0`, the second is `1`, and so on. + +=== "index.ts" + + ```typescript hl_lines="22 34-38" + --8<-- "docs/snippets/idempotency/makeIdempotentAnyFunction.ts" + ``` + +=== "Types" + + ```typescript + --8<-- "docs/snippets/idempotency/types.ts::13" + ``` + +### MakeHandlerIdempotent Middy middleware + +!!! tip "A note about Middy" + Currently we support only Middy `v3.x` that you can install it by running `npm i @middy/core@~3`. + Check their docs to learn more about [Middy and its middleware stack](https://middy.js.org/docs/intro/getting-started){target="_blank"} as well as [best practices when working with Powertools](https://middy.js.org/docs/integrations/lambda-powertools#best-practices){target="_blank"}. + +If you are using [Middy](https://middy.js.org){target="_blank"} as your middleware engine, you can use the `makeHandlerIdempotent` middleware to make your Lambda handler idempotent. Similar to the `makeIdempotent` function wrapper, you can quickly make your Lambda handler idempotent by initializing the `DynamoDBPersistenceLayer` class and using it with the `makeHandlerIdempotent` middleware. + +=== "index.ts" + + ```typescript hl_lines="22 36-40" + --8<-- "docs/snippets/idempotency/makeHandlerIdempotent.ts" + ``` + +=== "Types" + + ```typescript + --8<-- "docs/snippets/idempotency/types.ts::13" + ``` + +### Choosing a payload subset for idempotency + +???+ tip "Tip: Dealing with always changing payloads" + When dealing with a more elaborate payload, where parts of the payload always change, you should use the **`eventKeyJmesPath`** parameter. + +Use [`IdempotencyConfig`](#customizing-the-default-behavior) to instruct the idempotent decorator to only use a portion of your payload to verify whether a request is idempotent, and therefore it should not be retried. + +> **Payment scenario** + +In this example, we have a Lambda handler that creates a payment for a user subscribing to a product. We want to ensure that we don't accidentally charge our customer by subscribing them more than once. + +Imagine the function executes successfully, but the client never receives the response due to a connection issue. It is safe to retry in this instance, as the idempotent decorator will return a previously saved response. + +**What we want here** is to instruct Idempotency to use the `user` and `productId` fields from our incoming payload as our idempotency key. If we were to treat the entire request as our idempotency key, a simple HTTP header or timestamp change would cause our customer to be charged twice. + +???+ tip "Deserializing JSON strings in payloads for increased accuracy." + The payload extracted by the `eventKeyJmesPath` is treated as a string by default. This means there could be differences in whitespace even when the JSON payload itself is identical. + +=== "index.ts" + + ```typescript hl_lines="4 26-28 49" + --8<-- "docs/snippets/idempotency/makeIdempotentJmes.ts" + ``` + +=== "Example event" + + ```json hl_lines="28" + { + "version":"2.0", + "routeKey":"ANY /createpayment", + "rawPath":"/createpayment", + "rawQueryString":"", + "headers": { + "Header1": "value1", + "X-Idempotency-Key": "abcdefg" + }, + "requestContext":{ + "accountId":"123456789012", + "apiId":"api-id", + "domainName":"id.execute-api.us-east-1.amazonaws.com", + "domainPrefix":"id", + "http":{ + "method":"POST", + "path":"/createpayment", + "protocol":"HTTP/1.1", + "sourceIp":"ip", + "userAgent":"agent" + }, + "requestId":"id", + "routeKey":"ANY /createpayment", + "stage":"$default", + "time":"10/Feb/2021:13:40:43 +0000", + "timeEpoch":1612964443723 + }, + "body":"{\"user\":\"xyz\",\"productId\":\"123456789\"}", + "isBase64Encoded":false + } + ``` + +=== "Types" + + ```typescript + --8<-- "docs/snippets/idempotency/types.ts::13" + ``` + +### Lambda timeouts + +???+ note + This is automatically done when you wrap your Lambda handler with the [makeIdempotent](#makeIdempotent-function-wrapper) function wrapper, or use the [`makeHandlerIdempotent`](#makeHandlerIdempotent-middy-middleware) Middy middleware. + +To prevent against extended failed retries when a [Lambda function times out](https://aws.amazon.com/premiumsupport/knowledge-center/lambda-verify-invocation-timeouts/), Powertools for AWS calculates and includes the remaining invocation available time as part of the idempotency record. + +???+ example + If a second invocation happens **after** this timestamp, and the record is marked as `INPROGRESS`, we will execute the invocation again as if it was in the `EXPIRED` state (e.g, `expire_seconds` field elapsed). + + This means that if an invocation expired during execution, it will be quickly executed again on the next retry. + +???+ important + If you are only using the [makeIdempotent function wrapper](#makeIdempotent-function-wrapper) to guard isolated parts of your code, you must use `registerLambdaContext` available in the [idempotency config object](#customizing-the-default-behavior) to benefit from this protection. + +Here is an example on how you register the Lambda context in your handler: + +=== "Registering Lambda Context" + + ```typescript hl_lines="13 38" + --8<-- "docs/snippets/idempotency/makeIdempotentLambdaContext.ts" + ``` + +### Handling exceptions + +If you are making on your entire Lambda handler idempotent, any unhandled exceptions that are raised during the code execution will cause **the record in the persistence layer to be deleted**. +This means that new invocations will execute your code again despite having the same payload. If you don't want the record to be deleted, you need to catch exceptions within the idempotent function and return a successful response. + +
+```mermaid +sequenceDiagram + participant Client + participant Lambda + participant Persistence Layer + Client->>Lambda: Invoke (event) + Lambda->>Persistence Layer: Get or set (id=event.search(payload)) + activate Persistence Layer + Note right of Persistence Layer: Locked during this time. Prevents multiple
Lambda invocations with the same
payload running concurrently. + Lambda--xLambda: Call handler (event).
Raises exception + Lambda->>Persistence Layer: Delete record (id=event.search(payload)) + deactivate Persistence Layer + Lambda-->>Client: Return error response +``` +Idempotent sequence exception +
+ +If you are using `makeIdempotent` on any other function, any unhandled exceptions that are thrown _inside_ the wrapped function will cause the record in the persistence layer to be deleted, and allow the function to be executed again if retried. + +If an error is thrown _outside_ the scope of the decorated function and after your function has been called, the persistent record will not be affected. In this case, idempotency will be maintained for your decorated function. Example: + +=== "Handling exceptions" + + ```typescript hl_lines="39-40 47-48" + --8<-- "docs/snippets/idempotency/workingWithExceptions.ts" + ``` + +???+ warning + **We will throw `IdempotencyPersistenceLayerError`** if any of the calls to the persistence layer fail unexpectedly. + + As this happens outside the scope of your decorated function, you are not able to catch it when making your Lambda handler idempotent. + +### Idempotency request flow + +The following sequence diagrams explain how the Idempotency feature behaves under different scenarios. + +#### Successful request + +
+```mermaid +sequenceDiagram + participant Client + participant Lambda + participant Persistence Layer + alt initial request + Client->>Lambda: Invoke (event) + Lambda->>Persistence Layer: Get or set idempotency_key=hash(payload) + activate Persistence Layer + Note over Lambda,Persistence Layer: Set record status to INPROGRESS.
Prevents concurrent invocations
with the same payload + Lambda-->>Lambda: Call your function + Lambda->>Persistence Layer: Update record with result + deactivate Persistence Layer + Persistence Layer-->>Persistence Layer: Update record + Note over Lambda,Persistence Layer: Set record status to COMPLETE.
New invocations with the same payload
now return the same result + Lambda-->>Client: Response sent to client + else retried request + Client->>Lambda: Invoke (event) + Lambda->>Persistence Layer: Get or set idempotency_key=hash(payload) + activate Persistence Layer + Persistence Layer-->>Lambda: Already exists in persistence layer. + deactivate Persistence Layer + Note over Lambda,Persistence Layer: Record status is COMPLETE and not expired + Lambda-->>Client: Same response sent to client + end +``` +Idempotent successful request +
+ +#### Successful request with cache enabled + +!!! note "[In-memory cache is disabled by default](#using-in-memory-cache)." + +
+```mermaid +sequenceDiagram + participant Client + participant Lambda + participant Persistence Layer + alt initial request + Client->>Lambda: Invoke (event) + Lambda->>Persistence Layer: Get or set idempotency_key=hash(payload) + activate Persistence Layer + Note over Lambda,Persistence Layer: Set record status to INPROGRESS.
Prevents concurrent invocations
with the same payload + Lambda-->>Lambda: Call your function + Lambda->>Persistence Layer: Update record with result + deactivate Persistence Layer + Persistence Layer-->>Persistence Layer: Update record + Note over Lambda,Persistence Layer: Set record status to COMPLETE.
New invocations with the same payload
now return the same result + Lambda-->>Lambda: Save record and result in memory + Lambda-->>Client: Response sent to client + else retried request + Client->>Lambda: Invoke (event) + Lambda-->>Lambda: Get idempotency_key=hash(payload) + Note over Lambda,Persistence Layer: Record status is COMPLETE and not expired + Lambda-->>Client: Same response sent to client + end +``` +Idempotent successful request cached +
+ +#### Expired idempotency records + +
+```mermaid +sequenceDiagram + participant Client + participant Lambda + participant Persistence Layer + alt initial request + Client->>Lambda: Invoke (event) + Lambda->>Persistence Layer: Get or set idempotency_key=hash(payload) + activate Persistence Layer + Note over Lambda,Persistence Layer: Set record status to INPROGRESS.
Prevents concurrent invocations
with the same payload + Lambda-->>Lambda: Call your function + Lambda->>Persistence Layer: Update record with result + deactivate Persistence Layer + Persistence Layer-->>Persistence Layer: Update record + Note over Lambda,Persistence Layer: Set record status to COMPLETE.
New invocations with the same payload
now return the same result + Lambda-->>Client: Response sent to client + else retried request + Client->>Lambda: Invoke (event) + Lambda->>Persistence Layer: Get or set idempotency_key=hash(payload) + activate Persistence Layer + Persistence Layer-->>Lambda: Already exists in persistence layer. + deactivate Persistence Layer + Note over Lambda,Persistence Layer: Record status is COMPLETE but expired hours ago + loop Repeat initial request process + Note over Lambda,Persistence Layer: 1. Set record to INPROGRESS,
2. Call your function,
3. Set record to COMPLETE + end + Lambda-->>Client: Same response sent to client + end +``` +Previous Idempotent request expired +
+ +#### Concurrent identical in-flight requests + +
+```mermaid +sequenceDiagram + participant Client + participant Lambda + participant Persistence Layer + Client->>Lambda: Invoke (event) + Lambda->>Persistence Layer: Get or set idempotency_key=hash(payload) + activate Persistence Layer + Note over Lambda,Persistence Layer: Set record status to INPROGRESS.
Prevents concurrent invocations
with the same payload + par Second request + Client->>Lambda: Invoke (event) + Lambda->>Persistence Layer: Get or set idempotency_key=hash(payload) + Lambda--xLambda: IdempotencyAlreadyInProgressError + Lambda->>Client: Error sent to client if unhandled + end + Lambda-->>Lambda: Call your function + Lambda->>Persistence Layer: Update record with result + deactivate Persistence Layer + Persistence Layer-->>Persistence Layer: Update record + Note over Lambda,Persistence Layer: Set record status to COMPLETE.
New invocations with the same payload
now return the same result + Lambda-->>Client: Response sent to client +``` +Concurrent identical in-flight requests +
+ +#### Lambda request timeout + +
+```mermaid +sequenceDiagram + participant Client + participant Lambda + participant Persistence Layer + alt initial request + Client->>Lambda: Invoke (event) + Lambda->>Persistence Layer: Get or set idempotency_key=hash(payload) + activate Persistence Layer + Note over Lambda,Persistence Layer: Set record status to INPROGRESS.
Prevents concurrent invocations
with the same payload + Lambda-->>Lambda: Call your function + Note right of Lambda: Time out + Lambda--xLambda: Time out error + Lambda-->>Client: Return error response + deactivate Persistence Layer + else retry after Lambda timeout elapses + Client->>Lambda: Invoke (event) + Lambda->>Persistence Layer: Get or set idempotency_key=hash(payload) + activate Persistence Layer + Note over Lambda,Persistence Layer: Set record status to INPROGRESS.
Reset in_progress_expiry attribute + Lambda-->>Lambda: Call your function + Lambda->>Persistence Layer: Update record with result + deactivate Persistence Layer + Persistence Layer-->>Persistence Layer: Update record + Lambda-->>Client: Response sent to client + end +``` +Idempotent request during and after Lambda timeouts +
+ +#### Optional idempotency key + +
+```mermaid +sequenceDiagram + participant Client + participant Lambda + participant Persistence Layer + alt request with idempotency key + Client->>Lambda: Invoke (event) + Lambda->>Persistence Layer: Get or set idempotency_key=hash(payload) + activate Persistence Layer + Note over Lambda,Persistence Layer: Set record status to INPROGRESS.
Prevents concurrent invocations
with the same payload + Lambda-->>Lambda: Call your function + Lambda->>Persistence Layer: Update record with result + deactivate Persistence Layer + Persistence Layer-->>Persistence Layer: Update record + Note over Lambda,Persistence Layer: Set record status to COMPLETE.
New invocations with the same payload
now return the same result + Lambda-->>Client: Response sent to client + else request(s) without idempotency key + Client->>Lambda: Invoke (event) + Note over Lambda: Idempotency key is missing + Note over Persistence Layer: Skips any operation to fetch, update, and delete + Lambda-->>Lambda: Call your function + Lambda-->>Client: Response sent to client + end +``` +Optional idempotency key +
+ +## Advanced + +### Persistence layers + +#### DynamoDBPersistenceLayer + +This persistence layer is built-in, and you can either use an existing DynamoDB table or create a new one dedicated for idempotency state (recommended). + +=== "Customizing DynamoDBPersistenceLayer to suit your table structure" + + ```typescript hl_lines="7-15" + --8<-- "docs/snippets/idempotency/customizePersistenceLayer.ts" + ``` + +When using DynamoDB as a persistence layer, you can alter the attribute names by passing these parameters when initializing the persistence layer: + +| Parameter | Required | Default | Description | +| ------------------------ | ------------------ | ------------------------------------ | -------------------------------------------------------------------------------------------------------- | +| **tableName** | :heavy_check_mark: | | Table name to store state | +| **keyAttr** | | `id` | Partition key of the table. Hashed representation of the payload (unless **sort_key_attr** is specified) | +| **expiryAttr** | | `expiration` | Unix timestamp of when record expires | +| **inProgressExpiryAttr** | | `in_progress_expiration` | Unix timestamp of when record expires while in progress (in case of the invocation times out) | +| **statusAttr** | | `status` | Stores status of the lambda execution during and after invocation | +| **dataAttr** | | `data` | Stores results of successfully executed Lambda handlers | +| **validationKeyAttr** | | `validation` | Hashed representation of the parts of the event used for validation | +| **sortKeyAttr** | | | Sort key of the table (if table is configured with a sort key). | +| **staticPkValue** | | `idempotency#{LAMBDA_FUNCTION_NAME}` | Static value to use as the partition key. Only used when **sort_key_attr** is set. | + +### Customizing the default behavior + +Idempotent decorator can be further configured with **`IdempotencyConfig`** as seen in the previous examples. These are the available options for further configuration + +| Parameter | Default | Description | +| ----------------------------- | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| **eventKeyJmespath** | `''` | JMESPath expression to extract the idempotency key from the event | +| **payloadValidationJmespath** | `''` | JMESPath expression to validate whether certain parameters have changed in the event while the event payload | +| **throwOnNoIdempotencyKey** | `false` | Throw an error if no idempotency key was found in the request | +| **expiresAfterSeconds** | 3600 | The number of seconds to wait before a record is expired | +| **useLocalCache** | `false` | Whether to locally cache idempotency results | +| **localCacheMaxItems** | 256 | Max number of items to store in local cache | +| **hashFunction** | `md5` | Function to use for calculating hashes, as provided by the [crypto](https://nodejs.org/api/crypto.html#cryptocreatehashalgorithm-options){target="_blank"} module in the standard library. | + +### Handling concurrent executions with the same payload + +This utility will throw an **`IdempotencyAlreadyInProgressError`** error if you receive **multiple invocations with the same payload while the first invocation hasn't completed yet**. + +???+ info + If you receive `IdempotencyAlreadyInProgressError`, you can safely retry the operation. + +This is a locking mechanism for correctness. Since we don't know the result from the first invocation yet, we can't safely allow another concurrent execution. + +### Using in-memory cache + +**By default, in-memory local caching is disabled**, since we don't know how much memory you consume per invocation compared to the maximum configured in your Lambda function. + +???+ note "Note: This in-memory cache is local to each Lambda execution environment" + This means it will be effective in cases where your function's concurrency is low in comparison to the number of "retry" invocations with the same payload, because cache might be empty. + +You can enable in-memory caching with the **`useLocalCache`** parameter: + +=== "Caching idempotent transactions in-memory to prevent multiple calls to storage" + + ```typescript hl_lines="12-13" + --8<-- "docs/snippets/idempotency/workingWithLocalCache.ts" + ``` + +When enabled, the default is to cache a maximum of 256 records in each Lambda execution environment - You can change it with the **`maxLocalCacheSize`** parameter. + +### Expiring idempotency records + +!!! note "By default, we expire idempotency records after **an hour** (3600 seconds)." + +In most cases, it is not desirable to store the idempotency records forever. Rather, you want to guarantee that the same payload won't be executed within a period of time. + +You can change this window with the **`expiresAfterSeconds`** parameter: + +=== "Adjusting idempotency record expiration" + + ```typescript hl_lines="14" + --8<-- "docs/snippets/idempotency/workingWithRecordExpiration.ts" + ``` + +This will mark any records older than 5 minutes as expired, and [your function will be executed as normal if it is invoked with a matching payload](#expired-idempotency-records). + +???+ important "Idempotency record expiration vs DynamoDB time-to-live (TTL)" + [DynamoDB TTL is a feature](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/howitworks-ttl.html){target="_blank"} to remove items after a certain period of time, it may occur within 48 hours of expiration. + + We don't rely on DynamoDB or any persistence storage layer to determine whether a record is expired to avoid eventual inconsistency states. + + Instead, Idempotency records saved in the storage layer contain timestamps that can be verified upon retrieval and double checked within Idempotency feature. + + **Why?** + + A record might still be valid (`COMPLETE`) when we retrieved, but in some rare cases it might expire a second later. A record could also be [cached in memory](#using-in-memory-cache). You might also want to have idempotent transactions that should expire in seconds. + +### Payload validation + +???+ question "Question: What if your function is invoked with the same payload except some outer parameters have changed?" + Example: A payment transaction for a given productID was requested twice for the same customer, **however the amount to be paid has changed in the second transaction**. + +By default, we will return the same result as it returned before, however in this instance it may be misleading; we provide a fail fast payload validation to address this edge case. + +With **`payloadValidationJmesPath`**, you can provide an additional JMESPath expression to specify which part of the event body should be validated against previous idempotent invocations + +=== "Payload validation" + + ```typescript hl_lines="14-15" + --8<-- "docs/snippets/idempotency/workingWithPayloadValidation.ts" + ``` + +In this example, the **`userId`** and **`productId`** keys are used as the payload to generate the idempotency key, as per **`eventKeyJmespath`** parameter. + +???+ note + If we try to send the same request but with a different amount, we will raise **`IdempotencyValidationError`**. + +Without payload validation, we would have returned the same result as we did for the initial request. Since we're also returning an amount in the response, this could be quite confusing for the client. + +By using **`payloadValidationJmesPath="amount"`**, we prevent this potentially confusing behavior and instead throw an error. + +### Making idempotency key required + +If you want to enforce that an idempotency key is required, you can set **`throwOnNoIdempotencyKey`** to `true`. + +This means that we will raise **`IdempotencyKeyError`** if the evaluation of **`eventKeyJmesPath`** results in an empty subset. + +???+ warning + To prevent errors, transactions will not be treated as idempotent if **`throwOnNoIdempotencyKey`** is set to `false` and the evaluation of **`eventKeyJmesPath`** is an empty result. Therefore, no data will be fetched, stored, or deleted in the idempotency storage layer. + +=== "Idempotency key required" + + ```typescript hl_lines="14-15" + --8<-- "docs/snippets/idempotency/workingWithIdempotencyRequiredKey.ts" + ``` + +=== "Success Event" + + ```json hl_lines="3 6" + { + "user": { + "uid": "BB0D045C-8878-40C8-889E-38B3CB0A61B1", + "name": "Foo" + }, + "productId": 10000 + } + ``` + +=== "Failure Event" + + ```json hl_lines="3 5" + { + "user": { + "uid": "BB0D045C-8878-40C8-889E-38B3CB0A61B1", + "name": "foo", + "productId": 10000 + } + } + ``` + +### Customizing boto configuration + +The **`clientConfig`** and **`awsSdkV3Client`** parameters enable you to pass in custom configurations or your own [DynamoDBClient](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-dynamodb/classes/dynamodbclient.html){target="_blank"} when constructing the persistence store. + +=== "Passing specific configuration" + + ```typescript hl_lines="8-10" + --8<-- "docs/snippets/idempotency/workingWithCustomConfig.ts" + ``` + +=== "Passing custom DynamoDBClient" + + ```typescript hl_lines="7-9 12" + --8<-- "docs/snippets/idempotency/workingWithCustomClient.ts" + ``` + +### Using a DynamoDB table with a composite primary key + +When using a composite primary key table (hash+range key), use `sortKeyAttr` parameter when initializing your persistence layer. + +With this setting, we will save the idempotency key in the sort key instead of the primary key. By default, the primary key will now be set to `idempotency#{LAMBDA_FUNCTION_NAME}`. + +You can optionally set a static value for the partition key using the `staticPkValue` parameter. + +=== "Reusing a DynamoDB table that uses a composite primary key" + + ```typescript hl_lines="9" + --8<-- "docs/snippets/idempotency/workingWithCompositeKey.ts" + ``` + +The example function above would cause data to be stored in DynamoDB like this: + +| id | sort_key | expiration | status | data | +| ---------------------------- | -------------------------------- | ---------- | ----------- | ---------------------------------------------------------------- | +| idempotency#MyLambdaFunction | 1e956ef7da78d0cb890be999aecc0c9e | 1636549553 | COMPLETED | {"paymentId": "12345, "message": "success", "statusCode": 200} | +| idempotency#MyLambdaFunction | 2b2cdb5f86361e97b4383087c1ffdf27 | 1636549571 | COMPLETED | {"paymentId": "527212", "message": "success", "statusCode": 200} | +| idempotency#MyLambdaFunction | f091d2527ad1c78f05d54cc3f363be80 | 1636549585 | IN_PROGRESS | | + +## Extra resources + +If you're interested in a deep dive on how Amazon uses idempotency when building our APIs, check out +[this article](https://aws.amazon.com/builders-library/making-retries-safe-with-idempotent-APIs/){target="_blank"}. \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index a0d03992e6..f5b7408590 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -16,6 +16,7 @@ nav: - core/metrics.md - Utilities: - utilities/parameters.md + - utilities/idempotency.md theme: name: material diff --git a/packages/idempotency/README.md b/packages/idempotency/README.md index 15efa33f7d..534d532cb9 100644 --- a/packages/idempotency/README.md +++ b/packages/idempotency/README.md @@ -193,7 +193,7 @@ const config = new IdempotencyConfig({ }); export const handler = middy( - async (event: APIGatewayProxyEvent, _context: Context): Promise => { + async (_event: APIGatewayProxyEvent, _context: Context): Promise => { // your code goes here here } ).use( From e34d1d9f358ef2e75eda7f885379ce3a874df125 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Jul 2023 10:13:10 +0200 Subject: [PATCH 18/24] build(internal): bump semver from 5.7.1 to 5.7.2 (#1594) Bumps [semver](https://github.com/npm/node-semver) from 5.7.1 to 5.7.2. - [Release notes](https://github.com/npm/node-semver/releases) - [Changelog](https://github.com/npm/node-semver/blob/v5.7.2/CHANGELOG.md) - [Commits](https://github.com/npm/node-semver/compare/v5.7.1...v5.7.2) --- updated-dependencies: - dependency-name: semver dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- package-lock.json | 90 +++++++++++++++++++---------------------------- 1 file changed, 36 insertions(+), 54 deletions(-) diff --git a/package-lock.json b/package-lock.json index a859fa3a8a..07653ce722 100644 --- a/package-lock.json +++ b/package-lock.json @@ -2198,9 +2198,10 @@ "license": "MIT" }, "node_modules/@babel/core/node_modules/semver": { - "version": "6.3.0", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, - "license": "ISC", "bin": { "semver": "bin/semver.js" } @@ -2249,9 +2250,10 @@ } }, "node_modules/@babel/helper-compilation-targets/node_modules/semver": { - "version": "6.3.0", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, - "license": "ISC", "bin": { "semver": "bin/semver.js" } @@ -6447,8 +6449,9 @@ } }, "node_modules/aws-xray-sdk-core/node_modules/semver": { - "version": "5.7.1", - "license": "ISC", + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", "bin": { "semver": "bin/semver" } @@ -7390,8 +7393,9 @@ } }, "node_modules/cls-hooked/node_modules/semver": { - "version": "5.7.1", - "license": "ISC", + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", "bin": { "semver": "bin/semver" } @@ -7676,9 +7680,9 @@ } }, "node_modules/conventional-changelog-writer/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, "bin": { "semver": "bin/semver.js" @@ -9989,9 +9993,9 @@ } }, "node_modules/git-semver-tags/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, "bin": { "semver": "bin/semver.js" @@ -10995,9 +10999,10 @@ } }, "node_modules/istanbul-lib-instrument/node_modules/semver": { - "version": "6.3.0", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, - "license": "ISC", "bin": { "semver": "bin/semver.js" } @@ -12044,17 +12049,6 @@ "node": ">=8" } }, - "node_modules/lerna/node_modules/lru-cache": { - "version": "6.0.0", - "dev": true, - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/lerna/node_modules/minimatch": { "version": "3.0.5", "dev": true, @@ -12083,20 +12077,6 @@ "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/lerna/node_modules/semver": { - "version": "7.3.8", - "dev": true, - "license": "ISC", - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/lerna/node_modules/uuid": { "version": "8.3.2", "dev": true, @@ -12669,9 +12649,10 @@ } }, "node_modules/make-dir/node_modules/semver": { - "version": "6.3.0", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, - "license": "ISC", "bin": { "semver": "bin/semver.js" } @@ -13137,9 +13118,9 @@ } }, "node_modules/meow/node_modules/read-pkg/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", "dev": true, "bin": { "semver": "bin/semver" @@ -15761,9 +15742,9 @@ } }, "node_modules/read-pkg/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", "dev": true, "bin": { "semver": "bin/semver" @@ -16109,9 +16090,10 @@ "license": "ISC" }, "node_modules/semver": { - "version": "7.3.7", + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", "dev": true, - "license": "ISC", "dependencies": { "lru-cache": "^6.0.0" }, @@ -17838,9 +17820,9 @@ } }, "node_modules/write-json-file/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", "dev": true, "bin": { "semver": "bin/semver" From 0fc4f1ef56362417f10ed32a96a835eb1d3e561a Mon Sep 17 00:00:00 2001 From: Andrea Amorosi Date: Tue, 11 Jul 2023 10:55:31 +0200 Subject: [PATCH 19/24] chore(idempotency): mark the utility ready public beta (#1595) * chore(idempotency): mark utility as public beta * chore: manually increment version in commons --- .github/scripts/release_patch_package_json.js | 4 ++-- packages/commons/src/version.ts | 2 +- packages/idempotency/package.json | 6 ++++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.github/scripts/release_patch_package_json.js b/.github/scripts/release_patch_package_json.js index 68d23adcc5..0108d3aa36 100644 --- a/.github/scripts/release_patch_package_json.js +++ b/.github/scripts/release_patch_package_json.js @@ -17,8 +17,8 @@ if (process.argv.length < 3) { } const basePath = resolve(process.argv[2]); const packageJsonPath = join(basePath, 'package.json'); -const alphaPackages = ['@aws-lambda-powertools/idempotency']; -const betaPackages = []; +const alphaPackages = []; +const betaPackages = ['@aws-lambda-powertools/idempotency']; (() => { try { diff --git a/packages/commons/src/version.ts b/packages/commons/src/version.ts index aab8359875..60a864ee4c 100644 --- a/packages/commons/src/version.ts +++ b/packages/commons/src/version.ts @@ -1,2 +1,2 @@ // this file is auto generated, do not modify -export const PT_VERSION = '1.11.0'; +export const PT_VERSION = '1.11.1'; diff --git a/packages/idempotency/package.json b/packages/idempotency/package.json index 706465d13d..20aa1c0958 100644 --- a/packages/idempotency/package.json +++ b/packages/idempotency/package.json @@ -6,7 +6,9 @@ "name": "Amazon Web Services", "url": "https://aws.amazon.com" }, - "private": true, + "publishConfig": { + "access": "public" + }, "scripts": { "test": "npm run test:unit", "test:unit": "jest --group=unit --detectOpenHandles --coverage --verbose", @@ -97,4 +99,4 @@ "aws-sdk-client-mock": "^2.2.0", "aws-sdk-client-mock-jest": "^2.2.0" } -} +} \ No newline at end of file From 5266d55fa1beab67c7ae99634e7d900574678143 Mon Sep 17 00:00:00 2001 From: Andrea Amorosi Date: Tue, 11 Jul 2023 15:15:32 +0200 Subject: [PATCH 20/24] docs(internal): update AWS SDK links to new docs (#1597) --- docs/utilities/idempotency.md | 4 ++-- docs/utilities/parameters.md | 26 +++++++++++++------------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/docs/utilities/idempotency.md b/docs/utilities/idempotency.md index 9341869b68..70cec2c1c1 100644 --- a/docs/utilities/idempotency.md +++ b/docs/utilities/idempotency.md @@ -753,9 +753,9 @@ This means that we will raise **`IdempotencyKeyError`** if the evaluation of **` } ``` -### Customizing boto configuration +### Customizing AWS SDK configuration -The **`clientConfig`** and **`awsSdkV3Client`** parameters enable you to pass in custom configurations or your own [DynamoDBClient](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-dynamodb/classes/dynamodbclient.html){target="_blank"} when constructing the persistence store. +The **`clientConfig`** and **`awsSdkV3Client`** parameters enable you to pass in custom configurations or your own [DynamoDBClient](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/client/dynamodb/){target="_blank"} when constructing the persistence store. === "Passing specific configuration" diff --git a/docs/utilities/parameters.md b/docs/utilities/parameters.md index 75c3363ef9..8f98b07c08 100644 --- a/docs/utilities/parameters.md +++ b/docs/utilities/parameters.md @@ -19,7 +19,7 @@ The Parameters Utility helps to retrieve parameters from the System Manager Para ### Installation ???+ note - This utility supports **[AWS SDK for JavaScript v3](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/) only**. This allows the utility to be modular, and you to install only the SDK packages you need and keep your bundle size small. + This utility supports **[AWS SDK for JavaScript v3](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/){target="_blank"} only**. This allows the utility to be modular, and you to install only the SDK packages you need and keep your bundle size small. Depending on the provider you want to use, install the library and the corresponding AWS SDK package: @@ -367,18 +367,18 @@ You can use a special `sdkOptions` object argument to pass any supported option Here is the mapping between this utility's functions and methods and the underlying SDK: -| Provider | Function/Method | Client name | Function name | -| ------------------- | ------------------------------ | --------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| SSM Parameter Store | `getParameter` | `@aws-sdk/client-ssm` | [GetParameterCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-ssm/classes/getparametercommand.html) | -| SSM Parameter Store | `getParameters` | `@aws-sdk/client-ssm` | [GetParametersByPathCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-ssm/classes/getparametersbypathcommand.html) | -| SSM Parameter Store | `SSMProvider.get` | `@aws-sdk/client-ssm` | [GetParameterCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-ssm/classes/getparametercommand.html) | -| SSM Parameter Store | `SSMProvider.getMultiple` | `@aws-sdk/client-ssm` | [GetParametersByPathCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-ssm/classes/getparametersbypathcommand.html) | -| Secrets Manager | `getSecret` | `@aws-sdk/client-secrets-manager` | [GetSecretValueCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-secrets-manager/classes/getsecretvaluecommand.html) | -| Secrets Manager | `SecretsProvider.get` | `@aws-sdk/client-secrets-manager` | [GetSecretValueCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-secrets-manager/classes/getsecretvaluecommand.html) | -| AppConfig | `AppConfigProvider.get` | `@aws-sdk/client-appconfigdata` | [StartConfigurationSessionCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-appconfigdata/classes/startconfigurationsessioncommand.html) & [GetLatestConfigurationCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-appconfigdata/classes/getlatestconfigurationcommand.html) | -| AppConfig | `getAppConfig` | `@aws-sdk/client-appconfigdata` | [StartConfigurationSessionCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-appconfigdata/classes/startconfigurationsessioncommand.html) & [GetLatestConfigurationCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-appconfigdata/classes/getlatestconfigurationcommand.html) | -| DynamoDB | `DynamoDBProvider.get` | `@aws-sdk/client-dynamodb` | [GetItemCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-dynamodb/classes/getitemcommand.html) | -| DynamoDB | `DynamoDBProvider.getMultiple` | `@aws-sdk/client-dynamodb` | [QueryCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-dynamodb/classes/querycommand.html) | +| Provider | Function/Method | Client name | Function name | +| ------------------- | ------------------------------ | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| SSM Parameter Store | `getParameter` | `@aws-sdk/client-ssm` | [GetParameterCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/client/ssm/command/GetParameterCommand/){target="_blank"} | +| SSM Parameter Store | `getParameters` | `@aws-sdk/client-ssm` | [GetParametersByPathCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/client/ssm/command/GetParametersByPathCommand/){target="_blank"} | +| SSM Parameter Store | `SSMProvider.get` | `@aws-sdk/client-ssm` | [GetParameterCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/client/ssm/command/GetParameterCommand/){target="_blank"} | +| SSM Parameter Store | `SSMProvider.getMultiple` | `@aws-sdk/client-ssm` | [GetParametersByPathCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/client/ssm/command/GetParametersByPathCommand){target="_blank"} | +| Secrets Manager | `getSecret` | `@aws-sdk/client-secrets-manager` | [GetSecretValueCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/client/secrets-manager/command/GetSecretValueCommand/){target="_blank"} | +| Secrets Manager | `SecretsProvider.get` | `@aws-sdk/client-secrets-manager` | [GetSecretValueCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/client/secrets-manager/command/GetSecretValueCommand/){target="_blank"} | +| AppConfig | `AppConfigProvider.get` | `@aws-sdk/client-appconfigdata` | [StartConfigurationSessionCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/client/appconfigdata/command/StartConfigurationSessionCommand/){target="_blank"} & [GetLatestConfigurationCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/client/appconfigdata/command/GetLatestConfigurationCommand/){target="_blank"} | +| AppConfig | `getAppConfig` | `@aws-sdk/client-appconfigdata` | [StartConfigurationSessionCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/client/appconfigdata/command/StartConfigurationSessionCommand/){target="_blank"} & [GetLatestConfigurationCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/client/appconfigdata/command/GetLatestConfigurationCommand/){target="_blank"} | +| DynamoDB | `DynamoDBProvider.get` | `@aws-sdk/client-dynamodb` | [GetItemCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/client/dynamodb/command/GetItemCommand/){target="_blank"} | +| DynamoDB | `DynamoDBProvider.getMultiple` | `@aws-sdk/client-dynamodb` | [QueryCommand](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/client/dynamodb/command/QueryCommand/){target="_blank"} | ### Bring your own AWS SDK v3 client From 16719c36dd541724eaab4ee4ff3fd1c5bb101d1b Mon Sep 17 00:00:00 2001 From: Alexander Schueren Date: Tue, 11 Jul 2023 15:32:23 +0200 Subject: [PATCH 21/24] chore(maintenance): remove parameters utility from layer bundling and layers e2e tests (#1599) * remove parameter from e2e tests * remove parameters from canary stack as well --- .github/scripts/setup_tmp_layer_files.sh | 3 +-- layers/src/canary-stack.ts | 4 +--- layers/tests/e2e/layerPublisher.class.test.functionCode.ts | 2 -- 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/.github/scripts/setup_tmp_layer_files.sh b/.github/scripts/setup_tmp_layer_files.sh index 9d6da9635a..77c5875734 100644 --- a/.github/scripts/setup_tmp_layer_files.sh +++ b/.github/scripts/setup_tmp_layer_files.sh @@ -6,8 +6,7 @@ npm init -y npm i \ @aws-lambda-powertools/logger@$VERSION \ @aws-lambda-powertools/metrics@$VERSION \ - @aws-lambda-powertools/tracer@$VERSION \ - @aws-lambda-powertools/parameters@$VERSION + @aws-lambda-powertools/tracer@$VERSION rm -rf node_modules/@types \ package.json \ package-lock.json diff --git a/layers/src/canary-stack.ts b/layers/src/canary-stack.ts index 4847af21d3..17f0af14f3 100644 --- a/layers/src/canary-stack.ts +++ b/layers/src/canary-stack.ts @@ -41,17 +41,15 @@ export class CanaryStack extends Stack { '../tests/e2e/layerPublisher.class.test.functionCode.ts' ), handler: 'handler', - runtime: Runtime.NODEJS_18_X, + runtime: Runtime.NODEJS_14_X, functionName: `canary-${suffix}`, timeout: Duration.seconds(30), bundling: { externalModules: [ // don't package these modules, we want to pull them from the layer - 'aws-sdk', '@aws-lambda-powertools/logger', '@aws-lambda-powertools/metrics', '@aws-lambda-powertools/tracer', - '@aws-lambda-powertools/parameters', '@aws-lambda-powertools/commons', ], }, diff --git a/layers/tests/e2e/layerPublisher.class.test.functionCode.ts b/layers/tests/e2e/layerPublisher.class.test.functionCode.ts index 28e60424e8..a2cdcd8d42 100644 --- a/layers/tests/e2e/layerPublisher.class.test.functionCode.ts +++ b/layers/tests/e2e/layerPublisher.class.test.functionCode.ts @@ -2,14 +2,12 @@ import { readFileSync } from 'node:fs'; import { Logger } from '@aws-lambda-powertools/logger'; import { Metrics } from '@aws-lambda-powertools/metrics'; import { Tracer } from '@aws-lambda-powertools/tracer'; -import { SSMProvider } from '@aws-lambda-powertools/parameters/ssm'; const logger = new Logger({ logLevel: 'DEBUG', }); const metrics = new Metrics(); const tracer = new Tracer(); -new SSMProvider(); export const handler = (): void => { // Check that the packages version matches the expected one From 3f03c5c0e16f0e25d09740d8ad69864f64868feb Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 11 Jul 2023 13:54:11 +0000 Subject: [PATCH 22/24] chore(release): v1.11.1 [skip ci] --- CHANGELOG.md | 7 ++++++ examples/cdk/CHANGELOG.md | 4 ++++ examples/cdk/package.json | 8 +++---- examples/sam/CHANGELOG.md | 4 ++++ examples/sam/package.json | 8 +++---- layers/CHANGELOG.md | 4 ++++ layers/package.json | 2 +- lerna.json | 2 +- package-lock.json | 40 +++++++++++++++---------------- packages/commons/CHANGELOG.md | 4 ++++ packages/commons/package.json | 6 ++--- packages/idempotency/CHANGELOG.md | 6 +++++ packages/idempotency/package.json | 6 ++--- packages/logger/CHANGELOG.md | 4 ++++ packages/logger/package.json | 4 ++-- packages/metrics/CHANGELOG.md | 4 ++++ packages/metrics/package.json | 4 ++-- packages/parameters/CHANGELOG.md | 4 ++++ packages/parameters/package.json | 4 ++-- packages/tracer/CHANGELOG.md | 4 ++++ packages/tracer/package.json | 4 ++-- 21 files changed, 89 insertions(+), 44 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 916fbb03dd..53a55d6ce7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,13 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +## [1.11.1](https://github.com/aws-powertools/powertools-lambda-typescript/compare/v1.11.0...v1.11.1) (2023-07-11) + +### Bug Fixes + +- **docs:** fix alias in versions.json ([#1576](https://github.com/aws-powertools/powertools-lambda-typescript/issues/1576)) ([7198cbc](https://github.com/aws-powertools/powertools-lambda-typescript/commit/7198cbca28962e07486b90ecb4f265cafe28bf73)) +- **idempotency:** types, docs, and `makeIdempotent` function wrapper ([#1579](https://github.com/aws-powertools/powertools-lambda-typescript/issues/1579)) ([bba1c01](https://github.com/aws-powertools/powertools-lambda-typescript/commit/bba1c01a0b3f08e962568e1d0eb44d486829657b)) + # [1.11.0](https://github.com/aws-powertools/powertools-lambda-typescript/compare/v1.10.0...v1.11.0) (2023-06-29) ### Features diff --git a/examples/cdk/CHANGELOG.md b/examples/cdk/CHANGELOG.md index bccc27fff6..e4efad8067 100644 --- a/examples/cdk/CHANGELOG.md +++ b/examples/cdk/CHANGELOG.md @@ -3,6 +3,10 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +## [1.11.1](https://github.com/aws-powertools/powertools-lambda-typescript/compare/v1.11.0...v1.11.1) (2023-07-11) + +**Note:** Version bump only for package cdk-sample + # [1.11.0](https://github.com/aws-powertools/powertools-lambda-typescript/compare/v1.10.0...v1.11.0) (2023-06-29) **Note:** Version bump only for package cdk-sample diff --git a/examples/cdk/package.json b/examples/cdk/package.json index 431a888c38..aba33bb7a1 100644 --- a/examples/cdk/package.json +++ b/examples/cdk/package.json @@ -1,6 +1,6 @@ { "name": "cdk-sample", - "version": "1.11.0", + "version": "1.11.1", "author": { "name": "Amazon Web Services", "url": "https://aws.amazon.com" @@ -25,9 +25,9 @@ "*.js": "npm run lint-fix" }, "devDependencies": { - "@aws-lambda-powertools/logger": "^1.11.0", - "@aws-lambda-powertools/metrics": "^1.11.0", - "@aws-lambda-powertools/tracer": "^1.11.0", + "@aws-lambda-powertools/logger": "^1.11.1", + "@aws-lambda-powertools/metrics": "^1.11.1", + "@aws-lambda-powertools/tracer": "^1.11.1", "@aws-sdk/lib-dynamodb": "^3.360.0", "@types/aws-lambda": "^8.10.109", "@types/jest": "^29.2.4", diff --git a/examples/sam/CHANGELOG.md b/examples/sam/CHANGELOG.md index d532527373..940b2e83c4 100644 --- a/examples/sam/CHANGELOG.md +++ b/examples/sam/CHANGELOG.md @@ -3,6 +3,10 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +## [1.11.1](https://github.com/aws-powertools/powertools-lambda-typescript/compare/v1.11.0...v1.11.1) (2023-07-11) + +**Note:** Version bump only for package sam-example + # [1.11.0](https://github.com/aws-powertools/powertools-lambda-typescript/compare/v1.10.0...v1.11.0) (2023-06-29) **Note:** Version bump only for package sam-example diff --git a/examples/sam/package.json b/examples/sam/package.json index 8ea457e34e..65f00d0543 100644 --- a/examples/sam/package.json +++ b/examples/sam/package.json @@ -1,6 +1,6 @@ { "name": "sam-example", - "version": "1.11.0", + "version": "1.11.1", "author": { "name": "Amazon Web Services", "url": "https://aws.amazon.com" @@ -37,9 +37,9 @@ "typescript": "^4.9.4" }, "dependencies": { - "@aws-lambda-powertools/logger": "^1.11.0", - "@aws-lambda-powertools/metrics": "^1.11.0", - "@aws-lambda-powertools/tracer": "^1.11.0", + "@aws-lambda-powertools/logger": "^1.11.1", + "@aws-lambda-powertools/metrics": "^1.11.1", + "@aws-lambda-powertools/tracer": "^1.11.1", "@aws-sdk/client-dynamodb": "^3.360.0", "@aws-sdk/lib-dynamodb": "^3.360.0", "@middy/core": "^3.6.2", diff --git a/layers/CHANGELOG.md b/layers/CHANGELOG.md index 6bf943b6ba..ed53105c60 100644 --- a/layers/CHANGELOG.md +++ b/layers/CHANGELOG.md @@ -3,6 +3,10 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +## [1.11.1](https://github.com/aws-powertools/powertools-lambda-typescript/compare/v1.11.0...v1.11.1) (2023-07-11) + +**Note:** Version bump only for package layers + # [1.11.0](https://github.com/aws-powertools/powertools-lambda-typescript/compare/v1.10.0...v1.11.0) (2023-06-29) **Note:** Version bump only for package layers diff --git a/layers/package.json b/layers/package.json index bec43d142a..a7b43a2954 100644 --- a/layers/package.json +++ b/layers/package.json @@ -1,6 +1,6 @@ { "name": "layers", - "version": "1.11.0", + "version": "1.11.1", "bin": { "layer": "bin/layers.js" }, diff --git a/lerna.json b/lerna.json index 03a17affa5..27a6315be8 100644 --- a/lerna.json +++ b/lerna.json @@ -10,7 +10,7 @@ "examples/sam", "layers" ], - "version": "1.11.0", + "version": "1.11.1", "npmClient": "npm", "message": "chore(release): %s [skip ci]" } diff --git a/package-lock.json b/package-lock.json index 07653ce722..d9d180d8ae 100644 --- a/package-lock.json +++ b/package-lock.json @@ -284,7 +284,7 @@ }, "examples/cdk": { "name": "cdk-sample", - "version": "1.11.0", + "version": "1.11.1", "license": "MIT-0", "dependencies": { "@middy/core": "^3.6.2", @@ -297,9 +297,9 @@ "cdk-app": "bin/cdk-app.js" }, "devDependencies": { - "@aws-lambda-powertools/logger": "^1.11.0", - "@aws-lambda-powertools/metrics": "^1.11.0", - "@aws-lambda-powertools/tracer": "^1.11.0", + "@aws-lambda-powertools/logger": "^1.11.1", + "@aws-lambda-powertools/metrics": "^1.11.1", + "@aws-lambda-powertools/tracer": "^1.11.1", "@aws-sdk/lib-dynamodb": "^3.360.0", "@types/aws-lambda": "^8.10.109", "@types/jest": "^29.2.4", @@ -334,12 +334,12 @@ }, "examples/sam": { "name": "sam-example", - "version": "1.11.0", + "version": "1.11.1", "license": "MIT-0", "dependencies": { - "@aws-lambda-powertools/logger": "^1.11.0", - "@aws-lambda-powertools/metrics": "^1.11.0", - "@aws-lambda-powertools/tracer": "^1.11.0", + "@aws-lambda-powertools/logger": "^1.11.1", + "@aws-lambda-powertools/metrics": "^1.11.1", + "@aws-lambda-powertools/tracer": "^1.11.1", "@aws-sdk/client-dynamodb": "^3.360.0", "@aws-sdk/lib-dynamodb": "^3.360.0", "@middy/core": "^3.6.2", @@ -369,7 +369,7 @@ "dev": true }, "layers": { - "version": "1.11.0", + "version": "1.11.1", "license": "MIT-0", "bin": { "layer": "bin/layers.js" @@ -17985,7 +17985,7 @@ }, "packages/commons": { "name": "@aws-lambda-powertools/commons", - "version": "1.11.0", + "version": "1.11.1", "license": "MIT-0", "devDependencies": { "@aws-sdk/client-appconfigdata": "^3.360.0", @@ -17998,10 +17998,10 @@ }, "packages/idempotency": { "name": "@aws-lambda-powertools/idempotency", - "version": "1.11.0", + "version": "1.11.1", "license": "MIT-0", "dependencies": { - "@aws-lambda-powertools/commons": "^1.11.0", + "@aws-lambda-powertools/commons": "^1.11.1", "@aws-sdk/lib-dynamodb": "^3.231.0", "@aws-sdk/util-base64-node": "^3.209.0", "jmespath": "^0.16.0" @@ -18218,10 +18218,10 @@ }, "packages/logger": { "name": "@aws-lambda-powertools/logger", - "version": "1.11.0", + "version": "1.11.1", "license": "MIT-0", "dependencies": { - "@aws-lambda-powertools/commons": "^1.11.0", + "@aws-lambda-powertools/commons": "^1.11.1", "lodash.merge": "^4.6.2" }, "devDependencies": { @@ -18230,10 +18230,10 @@ }, "packages/metrics": { "name": "@aws-lambda-powertools/metrics", - "version": "1.11.0", + "version": "1.11.1", "license": "MIT-0", "dependencies": { - "@aws-lambda-powertools/commons": "^1.11.0" + "@aws-lambda-powertools/commons": "^1.11.1" }, "devDependencies": { "@aws-sdk/client-cloudwatch": "^3.360.0", @@ -18243,10 +18243,10 @@ }, "packages/parameters": { "name": "@aws-lambda-powertools/parameters", - "version": "1.11.0", + "version": "1.11.1", "license": "MIT-0", "dependencies": { - "@aws-lambda-powertools/commons": "^1.11.0", + "@aws-lambda-powertools/commons": "^1.11.1", "@aws-sdk/util-base64-node": "^3.209.0" }, "devDependencies": { @@ -18464,10 +18464,10 @@ }, "packages/tracer": { "name": "@aws-lambda-powertools/tracer", - "version": "1.11.0", + "version": "1.11.1", "license": "MIT-0", "dependencies": { - "@aws-lambda-powertools/commons": "^1.11.0", + "@aws-lambda-powertools/commons": "^1.11.1", "aws-xray-sdk-core": "^3.4.1" }, "devDependencies": { diff --git a/packages/commons/CHANGELOG.md b/packages/commons/CHANGELOG.md index fd912a0b86..5feb285227 100644 --- a/packages/commons/CHANGELOG.md +++ b/packages/commons/CHANGELOG.md @@ -3,6 +3,10 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +## [1.11.1](https://github.com/aws-powertools/powertools-lambda-typescript/compare/v1.11.0...v1.11.1) (2023-07-11) + +**Note:** Version bump only for package @aws-lambda-powertools/commons + # [1.11.0](https://github.com/aws-powertools/powertools-lambda-typescript/compare/v1.10.0...v1.11.0) (2023-06-29) **Note:** Version bump only for package @aws-lambda-powertools/commons diff --git a/packages/commons/package.json b/packages/commons/package.json index 1d058b12e3..7830b218bc 100644 --- a/packages/commons/package.json +++ b/packages/commons/package.json @@ -1,6 +1,6 @@ { "name": "@aws-lambda-powertools/commons", - "version": "1.11.0", + "version": "1.11.1", "description": "A shared utility package for Powertools for AWS Lambda (TypeScript) libraries", "author": { "name": "Amazon Web Services", @@ -47,9 +47,9 @@ "nodejs" ], "devDependencies": { - "@aws-sdk/client-lambda": "^3.360.0", - "@aws-sdk/client-dynamodb": "^3.360.0", "@aws-sdk/client-appconfigdata": "^3.360.0", + "@aws-sdk/client-dynamodb": "^3.360.0", + "@aws-sdk/client-lambda": "^3.360.0", "@aws-sdk/client-secrets-manager": "^3.360.0", "@aws-sdk/client-ssm": "^3.360.0", "@aws-sdk/util-utf8-node": "^3.259.0" diff --git a/packages/idempotency/CHANGELOG.md b/packages/idempotency/CHANGELOG.md index 9fada5a632..67a7488b19 100644 --- a/packages/idempotency/CHANGELOG.md +++ b/packages/idempotency/CHANGELOG.md @@ -3,6 +3,12 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +## [1.11.1](https://github.com/aws-powertools/powertools-lambda-typescript/compare/v1.11.0...v1.11.1) (2023-07-11) + +### Bug Fixes + +- **idempotency:** types, docs, and `makeIdempotent` function wrapper ([#1579](https://github.com/aws-powertools/powertools-lambda-typescript/issues/1579)) ([bba1c01](https://github.com/aws-powertools/powertools-lambda-typescript/commit/bba1c01a0b3f08e962568e1d0eb44d486829657b)) + # [1.11.0](https://github.com/aws-powertools/powertools-lambda-typescript/compare/v1.10.0...v1.11.0) (2023-06-29) ### Features diff --git a/packages/idempotency/package.json b/packages/idempotency/package.json index 20aa1c0958..cdd66eb073 100644 --- a/packages/idempotency/package.json +++ b/packages/idempotency/package.json @@ -1,6 +1,6 @@ { "name": "@aws-lambda-powertools/idempotency", - "version": "1.11.0", + "version": "1.11.1", "description": "The idempotency package for the Powertools for AWS Lambda (TypeScript) library. It provides options to make your Lambda functions idempotent and safe to retry.", "author": { "name": "Amazon Web Services", @@ -80,7 +80,7 @@ "url": "https://github.com/aws-powertools/powertools-lambda-typescript/issues" }, "dependencies": { - "@aws-lambda-powertools/commons": "^1.11.0", + "@aws-lambda-powertools/commons": "^1.11.1", "@aws-sdk/lib-dynamodb": "^3.231.0", "@aws-sdk/util-base64-node": "^3.209.0", "jmespath": "^0.16.0" @@ -99,4 +99,4 @@ "aws-sdk-client-mock": "^2.2.0", "aws-sdk-client-mock-jest": "^2.2.0" } -} \ No newline at end of file +} diff --git a/packages/logger/CHANGELOG.md b/packages/logger/CHANGELOG.md index 214db969c0..f43ac30a1b 100644 --- a/packages/logger/CHANGELOG.md +++ b/packages/logger/CHANGELOG.md @@ -3,6 +3,10 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +## [1.11.1](https://github.com/aws-powertools/powertools-lambda-typescript/compare/v1.11.0...v1.11.1) (2023-07-11) + +**Note:** Version bump only for package @aws-lambda-powertools/logger + # [1.11.0](https://github.com/aws-powertools/powertools-lambda-typescript/compare/v1.10.0...v1.11.0) (2023-06-29) **Note:** Version bump only for package @aws-lambda-powertools/logger diff --git a/packages/logger/package.json b/packages/logger/package.json index fe3ba823fb..77bc0399e3 100644 --- a/packages/logger/package.json +++ b/packages/logger/package.json @@ -1,6 +1,6 @@ { "name": "@aws-lambda-powertools/logger", - "version": "1.11.0", + "version": "1.11.1", "description": "The logging package for the Powertools for AWS Lambda (TypeScript) library", "author": { "name": "Amazon Web Services", @@ -45,7 +45,7 @@ "url": "https://github.com/aws-powertools/powertools-lambda-typescript/issues" }, "dependencies": { - "@aws-lambda-powertools/commons": "^1.11.0", + "@aws-lambda-powertools/commons": "^1.11.1", "lodash.merge": "^4.6.2" }, "keywords": [ diff --git a/packages/metrics/CHANGELOG.md b/packages/metrics/CHANGELOG.md index 56d75ac490..847089eba3 100644 --- a/packages/metrics/CHANGELOG.md +++ b/packages/metrics/CHANGELOG.md @@ -3,6 +3,10 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +## [1.11.1](https://github.com/aws-powertools/powertools-lambda-typescript/compare/v1.11.0...v1.11.1) (2023-07-11) + +**Note:** Version bump only for package @aws-lambda-powertools/metrics + # [1.11.0](https://github.com/aws-powertools/powertools-lambda-typescript/compare/v1.10.0...v1.11.0) (2023-06-29) **Note:** Version bump only for package @aws-lambda-powertools/metrics diff --git a/packages/metrics/package.json b/packages/metrics/package.json index 89871fbd83..195c2d9635 100644 --- a/packages/metrics/package.json +++ b/packages/metrics/package.json @@ -1,6 +1,6 @@ { "name": "@aws-lambda-powertools/metrics", - "version": "1.11.0", + "version": "1.11.1", "description": "The metrics package for the Powertools for AWS Lambda (TypeScript) library", "author": { "name": "Amazon Web Services", @@ -47,7 +47,7 @@ "url": "https://github.com/aws-powertools/powertools-lambda-typescript/issues" }, "dependencies": { - "@aws-lambda-powertools/commons": "^1.11.0" + "@aws-lambda-powertools/commons": "^1.11.1" }, "keywords": [ "aws", diff --git a/packages/parameters/CHANGELOG.md b/packages/parameters/CHANGELOG.md index 46731a556d..9c5c8d8f05 100644 --- a/packages/parameters/CHANGELOG.md +++ b/packages/parameters/CHANGELOG.md @@ -3,6 +3,10 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +## [1.11.1](https://github.com/aws-powertools/powertools-lambda-typescript/compare/v1.11.0...v1.11.1) (2023-07-11) + +**Note:** Version bump only for package @aws-lambda-powertools/parameters + # [1.11.0](https://github.com/aws-powertools/powertools-lambda-typescript/compare/v1.10.0...v1.11.0) (2023-06-29) **Note:** Version bump only for package @aws-lambda-powertools/parameters diff --git a/packages/parameters/package.json b/packages/parameters/package.json index 86e0fb927c..6f65015f98 100644 --- a/packages/parameters/package.json +++ b/packages/parameters/package.json @@ -1,6 +1,6 @@ { "name": "@aws-lambda-powertools/parameters", - "version": "1.11.0", + "version": "1.11.1", "description": "The parameters package for the Powertools for AWS Lambda (TypeScript) library", "author": { "name": "Amazon Web Services", @@ -147,7 +147,7 @@ "aws-sdk-client-mock-jest": "^2.2.0" }, "dependencies": { - "@aws-lambda-powertools/commons": "^1.11.0", + "@aws-lambda-powertools/commons": "^1.11.1", "@aws-sdk/util-base64-node": "^3.209.0" } } diff --git a/packages/tracer/CHANGELOG.md b/packages/tracer/CHANGELOG.md index 275924898c..3c70c2dad8 100644 --- a/packages/tracer/CHANGELOG.md +++ b/packages/tracer/CHANGELOG.md @@ -3,6 +3,10 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +## [1.11.1](https://github.com/aws-powertools/powertools-lambda-typescript/compare/v1.11.0...v1.11.1) (2023-07-11) + +**Note:** Version bump only for package @aws-lambda-powertools/tracer + # [1.11.0](https://github.com/aws-powertools/powertools-lambda-typescript/compare/v1.10.0...v1.11.0) (2023-06-29) **Note:** Version bump only for package @aws-lambda-powertools/tracer diff --git a/packages/tracer/package.json b/packages/tracer/package.json index a071a5b426..d3c98ff426 100644 --- a/packages/tracer/package.json +++ b/packages/tracer/package.json @@ -1,6 +1,6 @@ { "name": "@aws-lambda-powertools/tracer", - "version": "1.11.0", + "version": "1.11.1", "description": "The tracer package for the Powertools for AWS Lambda (TypeScript) library", "author": { "name": "Amazon Web Services", @@ -51,7 +51,7 @@ "url": "https://github.com/aws-powertools/powertools-lambda-typescript/issues" }, "dependencies": { - "@aws-lambda-powertools/commons": "^1.11.0", + "@aws-lambda-powertools/commons": "^1.11.1", "aws-xray-sdk-core": "^3.4.1" }, "keywords": [ From 3251b9c7c90e21e6ac46bcf72f1c28116a3f2620 Mon Sep 17 00:00:00 2001 From: Alexander Schueren Date: Tue, 11 Jul 2023 16:31:17 +0200 Subject: [PATCH 23/24] fix canary deploy in ci with correct workspace name (#1601) --- .github/workflows/reusable_deploy_layer_stack.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/reusable_deploy_layer_stack.yml b/.github/workflows/reusable_deploy_layer_stack.yml index e044615f1c..27521fc887 100644 --- a/.github/workflows/reusable_deploy_layer_stack.yml +++ b/.github/workflows/reusable_deploy_layer_stack.yml @@ -95,7 +95,7 @@ jobs: if-no-files-found: error retention-days: 1 - name: CDK deploy canary - run: npm run cdk -w layer -- deploy --app cdk.out --context region=${{ matrix.region }} 'CanaryStack' --require-approval never --verbose --outputs-file cdk-outputs.json + run: npm run cdk -w layers -- deploy --app cdk.out --context region=${{ matrix.region }} 'CanaryStack' --require-approval never --verbose --outputs-file cdk-outputs.json update_layer_arn_docs: needs: deploy-cdk-stack permissions: From 7cfac60bd5f597348c3dbb6a1aebdbf22a0fed3d Mon Sep 17 00:00:00 2001 From: "Release bot[bot]" Date: Tue, 11 Jul 2023 14:43:15 +0000 Subject: [PATCH 24/24] chore: update layer ARN on documentation --- docs/index.md | 68 +++++++++++++++++++++++++-------------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/docs/index.md b/docs/index.md index 5f599adfe4..6378415085 100644 --- a/docs/index.md +++ b/docs/index.md @@ -26,7 +26,7 @@ You can use Powertools for AWS Lambda (TypeScript) in both TypeScript and JavaSc You can install Powertools for AWS Lambda (TypeScript) using one of the following options: -* **Lambda Layer**: [**arn:aws:lambda:{region}:094274105915:layer:AWSLambdaPowertoolsTypeScript:15**](#){: .copyMe}:clipboard: +* **Lambda Layer**: [**arn:aws:lambda:{region}:094274105915:layer:AWSLambdaPowertoolsTypeScript:16**](#){: .copyMe}:clipboard: * **npm**: [`npm install @aws-lambda-powertools/tracer @aws-lambda-powertools/metrics @aws-lambda-powertools/logger`](#){: .copyMe}:clipboard: ### Lambda Layer @@ -41,31 +41,31 @@ You can include Powertools for AWS Lambda (TypeScript) Lambda Layer using [AWS L | Region | Layer ARN | | ---------------- | ------------------------------------------------------------------------------------------------------------ | - | `us-east-1` | [arn:aws:lambda:us-east-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `us-east-2` | [arn:aws:lambda:us-east-2:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `us-west-1` | [arn:aws:lambda:us-west-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `us-west-2` | [arn:aws:lambda:us-west-2:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `ap-south-1` | [arn:aws:lambda:ap-south-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `ap-east-1` | [arn:aws:lambda:ap-east-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `ap-northeast-1` | [arn:aws:lambda:ap-northeast-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `ap-northeast-2` | [arn:aws:lambda:ap-northeast-2:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `ap-northeast-3` | [arn:aws:lambda:ap-northeast-3:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `ap-southeast-1` | [arn:aws:lambda:ap-southeast-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `ap-southeast-2` | [arn:aws:lambda:ap-southeast-2:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `ap-southeast-3` | [arn:aws:lambda:ap-southeast-3:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `ap-southeast-4` | [arn:aws:lambda:ap-southeast-4:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `eu-central-1` | [arn:aws:lambda:eu-central-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `eu-central-2` | [arn:aws:lambda:eu-central-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `eu-west-1` | [arn:aws:lambda:eu-west-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `eu-west-2` | [arn:aws:lambda:eu-west-2:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `eu-west-3` | [arn:aws:lambda:eu-west-3:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `eu-north-1` | [arn:aws:lambda:eu-north-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `eu-south-1` | [arn:aws:lambda:eu-south-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `eu-south-2` | [arn:aws:lambda:eu-south-2:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `ca-central-1` | [arn:aws:lambda:ca-central-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `sa-east-1` | [arn:aws:lambda:sa-east-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `af-south-1` | [arn:aws:lambda:af-south-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | - | `me-south-1` | [arn:aws:lambda:me-south-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:15](#){: .copyMe}:clipboard: | + | `us-east-1` | [arn:aws:lambda:us-east-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `us-east-2` | [arn:aws:lambda:us-east-2:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `us-west-1` | [arn:aws:lambda:us-west-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `us-west-2` | [arn:aws:lambda:us-west-2:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `ap-south-1` | [arn:aws:lambda:ap-south-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `ap-east-1` | [arn:aws:lambda:ap-east-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `ap-northeast-1` | [arn:aws:lambda:ap-northeast-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `ap-northeast-2` | [arn:aws:lambda:ap-northeast-2:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `ap-northeast-3` | [arn:aws:lambda:ap-northeast-3:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `ap-southeast-1` | [arn:aws:lambda:ap-southeast-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `ap-southeast-2` | [arn:aws:lambda:ap-southeast-2:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `ap-southeast-3` | [arn:aws:lambda:ap-southeast-3:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `ap-southeast-4` | [arn:aws:lambda:ap-southeast-4:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `eu-central-1` | [arn:aws:lambda:eu-central-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `eu-central-2` | [arn:aws:lambda:eu-central-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `eu-west-1` | [arn:aws:lambda:eu-west-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `eu-west-2` | [arn:aws:lambda:eu-west-2:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `eu-west-3` | [arn:aws:lambda:eu-west-3:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `eu-north-1` | [arn:aws:lambda:eu-north-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `eu-south-1` | [arn:aws:lambda:eu-south-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `eu-south-2` | [arn:aws:lambda:eu-south-2:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `ca-central-1` | [arn:aws:lambda:ca-central-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `sa-east-1` | [arn:aws:lambda:sa-east-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `af-south-1` | [arn:aws:lambda:af-south-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | + | `me-south-1` | [arn:aws:lambda:me-south-1:094274105915:layer:AWSLambdaPowertoolsTypeScript:16](#){: .copyMe}:clipboard: | ??? note "Click to expand and copy code snippets for popular frameworks" @@ -76,7 +76,7 @@ You can include Powertools for AWS Lambda (TypeScript) Lambda Layer using [AWS L Type: AWS::Serverless::Function Properties: Layers: - - !Sub arn:aws:lambda:${AWS::Region}:094274105915:layer:AWSLambdaPowertoolsTypeScript:15 + - !Sub arn:aws:lambda:${AWS::Region}:094274105915:layer:AWSLambdaPowertoolsTypeScript:16 ``` If you use `esbuild` to bundle your code, make sure to exclude `@aws-lambda-powertools` from being bundled since the packages will be already present the Layer: @@ -107,7 +107,7 @@ You can include Powertools for AWS Lambda (TypeScript) Lambda Layer using [AWS L hello: handler: lambda_function.lambda_handler layers: - - arn:aws:lambda:${aws::region}:094274105915:layer:AWSLambdaPowertoolsTypeScript:15 + - arn:aws:lambda:${aws::region}:094274105915:layer:AWSLambdaPowertoolsTypeScript:16 ``` If you use `esbuild` to bundle your code, make sure to exclude `@aws-lambda-powertools` from being bundled since the packages will be already present the Layer: @@ -139,7 +139,7 @@ You can include Powertools for AWS Lambda (TypeScript) Lambda Layer using [AWS L const powertoolsLayer = lambda.LayerVersion.fromLayerVersionArn( this, 'PowertoolsLayer', - `arn:aws:lambda:${cdk.Stack.of(this).region}:094274105915:layer:AWSLambdaPowertoolsTypeScript:15` + `arn:aws:lambda:${cdk.Stack.of(this).region}:094274105915:layer:AWSLambdaPowertoolsTypeScript:16` ); new lambda.Function(this, 'Function', { @@ -191,7 +191,7 @@ You can include Powertools for AWS Lambda (TypeScript) Lambda Layer using [AWS L role = ... handler = "index.handler" runtime = "nodejs16.x" - layers = ["arn:aws:lambda:{aws::region}:094274105915:layer:AWSLambdaPowertoolsTypeScript:15"] + layers = ["arn:aws:lambda:{aws::region}:094274105915:layer:AWSLambdaPowertoolsTypeScript:16"] source_code_hash = filebase64sha256("lambda_function_payload.zip") } ``` @@ -209,7 +209,7 @@ You can include Powertools for AWS Lambda (TypeScript) Lambda Layer using [AWS L const lambdaFunction = new aws.lambda.Function('function', { layers: [ - pulumi.interpolate`arn:aws:lambda:${aws.getRegionOutput().name}:094274105915:layer:AWSLambdaPowertoolsTypeScript:15` + pulumi.interpolate`arn:aws:lambda:${aws.getRegionOutput().name}:094274105915:layer:AWSLambdaPowertoolsTypeScript:16` ], code: new pulumi.asset.FileArchive('lambda_function_payload.zip'), tracingConfig: { @@ -233,7 +233,7 @@ You can include Powertools for AWS Lambda (TypeScript) Lambda Layer using [AWS L ? Do you want to configure advanced settings? Yes ... ? Do you want to enable Lambda layers for this function? Yes - ? Enter up to 5 existing Lambda layer ARNs (comma-separated): arn:aws:lambda:{aws::region}:094274105915:layer:AWSLambdaPowertoolsTypeScript:15 + ? Enter up to 5 existing Lambda layer ARNs (comma-separated): arn:aws:lambda:{aws::region}:094274105915:layer:AWSLambdaPowertoolsTypeScript:16 ❯ amplify push -y # Updating an existing function and add the layer @@ -243,7 +243,7 @@ You can include Powertools for AWS Lambda (TypeScript) Lambda Layer using [AWS L - Name: ? Which setting do you want to update? Lambda layers configuration ? Do you want to enable Lambda layers for this function? Yes - ? Enter up to 5 existing Lambda layer ARNs (comma-separated): arn:aws:lambda:{aws::region}:094274105915:layer:AWSLambdaPowertoolsTypeScript:15 + ? Enter up to 5 existing Lambda layer ARNs (comma-separated): arn:aws:lambda:{aws::region}:094274105915:layer:AWSLambdaPowertoolsTypeScript:16 ? Do you want to edit the local lambda function now? No ``` @@ -253,7 +253,7 @@ You can include Powertools for AWS Lambda (TypeScript) Lambda Layer using [AWS L Change {region} to your AWS region, e.g. `eu-west-1` ```bash title="AWS CLI" - aws lambda get-layer-version-by-arn --arn arn:aws:lambda:{aws::region}:094274105915:layer:AWSLambdaPowertoolsTypeScript:15 --region {region} + aws lambda get-layer-version-by-arn --arn arn:aws:lambda:{aws::region}:094274105915:layer:AWSLambdaPowertoolsTypeScript:16 --region {region} ``` The pre-signed URL to download this Lambda Layer will be within `Location` key.