Skip to content

Control the maximum amount of events in single logRequest to be 1000 in Performance Monitoring #3896

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Dec 8, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .changeset/snake-ten-some.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
'@firebase/performance': patch
'firebase': patch
---

Dispatch up to 1000 events for each network request when collecting performance events.
108 changes: 89 additions & 19 deletions packages-exp/performance-exp/src/services/transport_service.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ describe('Firebase Performance > transport_service', () => {
let fetchStub: SinonStub<[RequestInfo, RequestInit?], Promise<Response>>;
const INITIAL_SEND_TIME_DELAY_MS = 5.5 * 1000;
const DEFAULT_SEND_INTERVAL_MS = 10 * 1000;
const MAX_EVENT_COUNT_PER_REQUEST = 1000;
const TRANSPORT_DELAY_INTERVAL = 10000;
// Starts date at timestamp 1 instead of 0, otherwise it causes validation errors.
let clock: SinonFakeTimers;
const testTransportHandler = transportHandler((...args) => {
Expand All @@ -55,7 +57,7 @@ describe('Firebase Performance > transport_service', () => {
}).to.throw;
});

it('does not attempt to log an event to cc after INITIAL_SEND_TIME_DELAY_MS if queue is empty', () => {
it('does not attempt to log an event after INITIAL_SEND_TIME_DELAY_MS if queue is empty', () => {
fetchStub.resolves(
new Response('', {
status: 200,
Expand All @@ -67,7 +69,7 @@ describe('Firebase Performance > transport_service', () => {
expect(fetchStub).to.not.have.been.called;
});

it('attempts to log an event to cc after DEFAULT_SEND_INTERVAL_MS if queue not empty', () => {
it('attempts to log an event after DEFAULT_SEND_INTERVAL_MS if queue not empty', async () => {
fetchStub.resolves(
new Response('', {
status: 200,
Expand All @@ -82,32 +84,100 @@ describe('Firebase Performance > transport_service', () => {
});

it('successful send a meesage to transport', () => {
const transportDelayInterval = 30000;
const setting = SettingsService.getInstance();
const flTransportFullUrl =
setting.flTransportEndpointUrl + '?key=' + setting.transportKey;
fetchStub.withArgs(flTransportFullUrl, match.any).resolves(
// DELETE_REQUEST means event dispatch is successful.
new Response(
'{\
"nextRequestWaitMillis": "' +
transportDelayInterval +
'",\
"logResponseDetails": [\
{\
"responseAction": "DELETE_REQUEST"\
}\
]\
}',
{
status: 200,
headers: { 'Content-type': 'application/json' }
}
)
generateSuccessResponse()
);

testTransportHandler('event1');
clock.tick(INITIAL_SEND_TIME_DELAY_MS);
expect(fetchStub).to.have.been.calledOnce;
});

it('sends up to the maximum event limit in one request', async () => {
// Arrange
const setting = SettingsService.getInstance();
const flTransportFullUrl =
setting.flTransportEndpointUrl + '?key=' + setting.transportKey;

// Returns successful response from fl for logRequests.
const response = generateSuccessResponse();
stub(response, 'json').resolves(JSON.parse(generateSuccessResponseBody()));
fetchStub.resolves(response);

// Act
// Generate 1020 events, which should be dispatched in two batches (1000 events and 20 events).
for (let i = 0; i < 1020; i++) {
testTransportHandler('event' + i);
}
// Wait for first and second event dispatch to happen.
clock.tick(INITIAL_SEND_TIME_DELAY_MS);
// This is to resolve the floating promise chain in transport service.
await Promise.resolve().then().then().then();
clock.tick(DEFAULT_SEND_INTERVAL_MS);

// Assert
// Expects the first logRequest which contains first 1000 events.
const firstLogRequest = generateLogRequest('5501');
for (let i = 0; i < MAX_EVENT_COUNT_PER_REQUEST; i++) {
firstLogRequest['log_event'].push({
'source_extension_json_proto3': 'event' + i,
'event_time_ms': '1'
});
}
expect(fetchStub).which.to.have.been.calledWith(flTransportFullUrl, {
method: 'POST',
body: JSON.stringify(firstLogRequest)
});
// Expects the second logRequest which contains remaining 20 events;
const secondLogRequest = generateLogRequest('15501');
for (let i = 0; i < 20; i++) {
secondLogRequest['log_event'].push({
'source_extension_json_proto3':
'event' + (MAX_EVENT_COUNT_PER_REQUEST + i),
'event_time_ms': '1'
});
}
expect(fetchStub).calledWith(flTransportFullUrl, {
method: 'POST',
body: JSON.stringify(secondLogRequest)
});
});

function generateLogRequest(requestTimeMs: string): any {
return {
'request_time_ms': requestTimeMs,
'client_info': {
'client_type': 1,
'js_client_info': {}
},
'log_source': 462,
'log_event': [] as any
};
}

function generateSuccessResponse(): Response {
return new Response(generateSuccessResponseBody(), {
status: 200,
headers: { 'Content-type': 'application/json' }
});
}

function generateSuccessResponseBody(): string {
return (
'{\
"nextRequestWaitMillis": "' +
TRANSPORT_DELAY_INTERVAL +
'",\
"logResponseDetails": [\
{\
"responseAction": "DELETE_REQUEST"\
}\
]\
}'
);
}
});
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ const DEFAULT_SEND_INTERVAL_MS = 10 * 1000;
const INITIAL_SEND_TIME_DELAY_MS = 5.5 * 1000;
// If end point does not work, the call will be tried for these many times.
const DEFAULT_REMAINING_TRIES = 3;
const MAX_EVENT_COUNT_PER_REQUEST = 1000;
let remainingTries = DEFAULT_REMAINING_TRIES;

interface LogResponseDetails {
Expand Down Expand Up @@ -90,9 +91,10 @@ function processQueue(timeOffset: number): void {
}

function dispatchQueueEvents(): void {
// Capture a snapshot of the queue and empty the "official queue".
const staged = [...queue];
queue = [];
// Extract events up to the maximum cap of single logRequest from top of "official queue".
// The staged events will be used for current logRequest attempt, remaining events will be kept
// for next attempt.
const staged = queue.splice(0, MAX_EVENT_COUNT_PER_REQUEST);

/* eslint-disable camelcase */
// We will pass the JSON serialized event to the backend.
Expand Down
108 changes: 89 additions & 19 deletions packages/performance/src/services/transport_service.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ describe('Firebase Performance > transport_service', () => {
let fetchStub: SinonStub<[RequestInfo, RequestInit?], Promise<Response>>;
const INITIAL_SEND_TIME_DELAY_MS = 5.5 * 1000;
const DEFAULT_SEND_INTERVAL_MS = 10 * 1000;
const MAX_EVENT_COUNT_PER_REQUEST = 1000;
const TRANSPORT_DELAY_INTERVAL = 10000;
// Starts date at timestamp 1 instead of 0, otherwise it causes validation errors.
let clock: SinonFakeTimers;
const testTransportHandler = transportHandler((...args) => {
Expand All @@ -55,7 +57,7 @@ describe('Firebase Performance > transport_service', () => {
}).to.throw;
});

it('does not attempt to log an event to cc after INITIAL_SEND_TIME_DELAY_MS if queue is empty', () => {
it('does not attempt to log an event after INITIAL_SEND_TIME_DELAY_MS if queue is empty', () => {
fetchStub.resolves(
new Response('', {
status: 200,
Expand All @@ -67,7 +69,7 @@ describe('Firebase Performance > transport_service', () => {
expect(fetchStub).to.not.have.been.called;
});

it('attempts to log an event to cc after DEFAULT_SEND_INTERVAL_MS if queue not empty', () => {
it('attempts to log an event after DEFAULT_SEND_INTERVAL_MS if queue not empty', async () => {
fetchStub.resolves(
new Response('', {
status: 200,
Expand All @@ -82,32 +84,100 @@ describe('Firebase Performance > transport_service', () => {
});

it('successful send a meesage to transport', () => {
const transportDelayInterval = 30000;
const setting = SettingsService.getInstance();
const flTransportFullUrl =
setting.flTransportEndpointUrl + '?key=' + setting.transportKey;
fetchStub.withArgs(flTransportFullUrl, match.any).resolves(
// DELETE_REQUEST means event dispatch is successful.
new Response(
'{\
"nextRequestWaitMillis": "' +
transportDelayInterval +
'",\
"logResponseDetails": [\
{\
"responseAction": "DELETE_REQUEST"\
}\
]\
}',
{
status: 200,
headers: { 'Content-type': 'application/json' }
}
)
generateSuccessResponse()
);

testTransportHandler('event1');
clock.tick(INITIAL_SEND_TIME_DELAY_MS);
expect(fetchStub).to.have.been.calledOnce;
});

it('sends up to the maximum event limit in one request', async () => {
// Arrange
const setting = SettingsService.getInstance();
const flTransportFullUrl =
setting.flTransportEndpointUrl + '?key=' + setting.transportKey;

// Returns successful response from fl for logRequests.
const response = generateSuccessResponse();
stub(response, 'json').resolves(JSON.parse(generateSuccessResponseBody()));
fetchStub.resolves(response);

// Act
// Generate 1020 events, which should be dispatched in two batches (1000 events and 20 events).
for (let i = 0; i < 1020; i++) {
testTransportHandler('event' + i);
}
// Wait for first and second event dispatch to happen.
clock.tick(INITIAL_SEND_TIME_DELAY_MS);
// This is to resolve the floating promise chain in transport service.
await Promise.resolve().then().then().then();
clock.tick(DEFAULT_SEND_INTERVAL_MS);

// Assert
// Expects the first logRequest which contains first 1000 events.
const firstLogRequest = generateLogRequest('5501');
for (let i = 0; i < MAX_EVENT_COUNT_PER_REQUEST; i++) {
firstLogRequest['log_event'].push({
'source_extension_json_proto3': 'event' + i,
'event_time_ms': '1'
});
}
expect(fetchStub).which.to.have.been.calledWith(flTransportFullUrl, {
method: 'POST',
body: JSON.stringify(firstLogRequest)
});
// Expects the second logRequest which contains remaining 20 events;
const secondLogRequest = generateLogRequest('15501');
for (let i = 0; i < 20; i++) {
secondLogRequest['log_event'].push({
'source_extension_json_proto3':
'event' + (MAX_EVENT_COUNT_PER_REQUEST + i),
'event_time_ms': '1'
});
}
expect(fetchStub).calledWith(flTransportFullUrl, {
method: 'POST',
body: JSON.stringify(secondLogRequest)
});
});

function generateLogRequest(requestTimeMs: string): any {
return {
'request_time_ms': requestTimeMs,
'client_info': {
'client_type': 1,
'js_client_info': {}
},
'log_source': 462,
'log_event': [] as any
};
}

function generateSuccessResponse(): Response {
return new Response(generateSuccessResponseBody(), {
status: 200,
headers: { 'Content-type': 'application/json' }
});
}

function generateSuccessResponseBody(): string {
return (
'{\
"nextRequestWaitMillis": "' +
TRANSPORT_DELAY_INTERVAL +
'",\
"logResponseDetails": [\
{\
"responseAction": "DELETE_REQUEST"\
}\
]\
}'
);
}
});
8 changes: 5 additions & 3 deletions packages/performance/src/services/transport_service.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ const DEFAULT_SEND_INTERVAL_MS = 10 * 1000;
const INITIAL_SEND_TIME_DELAY_MS = 5.5 * 1000;
// If end point does not work, the call will be tried for these many times.
const DEFAULT_REMAINING_TRIES = 3;
const MAX_EVENT_COUNT_PER_REQUEST = 1000;
let remainingTries = DEFAULT_REMAINING_TRIES;

interface LogResponseDetails {
Expand Down Expand Up @@ -90,9 +91,10 @@ function processQueue(timeOffset: number): void {
}

function dispatchQueueEvents(): void {
// Capture a snapshot of the queue and empty the "official queue".
const staged = [...queue];
queue = [];
// Extract events up to the maximum cap of single logRequest from top of "official queue".
// The staged events will be used for current logRequest attempt, remaining events will be kept
// for next attempt.
const staged = queue.splice(0, MAX_EVENT_COUNT_PER_REQUEST);

/* eslint-disable camelcase */
// We will pass the JSON serialized event to the backend.
Expand Down