.-protofile` (example: `v1.1.0-protofile`)
+3. Merge `dev` to `master`
+
+## Consuming FunctionRPC.proto
+*Note: Update versionNumber before running following commands*
+
+## CSharp
+```
+set NUGET_PATH="%UserProfile%\.nuget\packages"
+set GRPC_TOOLS_PATH=%NUGET_PATH%\grpc.tools\\tools\windows_x86
+set PROTO_PATH=.\azure-functions-language-worker-protobuf\src\proto
+set PROTO=.\azure-functions-language-worker-protobuf\src\proto\FunctionRpc.proto
+set PROTOBUF_TOOLS=%NUGET_PATH%\google.protobuf.tools\\tools
+set MSGDIR=.\Messages
+
+if exist %MSGDIR% rmdir /s /q %MSGDIR%
+mkdir %MSGDIR%
+
+set OUTDIR=%MSGDIR%\DotNet
+mkdir %OUTDIR%
+%GRPC_TOOLS_PATH%\protoc.exe %PROTO% --csharp_out %OUTDIR% --grpc_out=%OUTDIR% --plugin=protoc-gen-grpc=%GRPC_TOOLS_PATH%\grpc_csharp_plugin.exe --proto_path=%PROTO_PATH% --proto_path=%PROTOBUF_TOOLS%
+```
+## JavaScript
+In package.json, add to the build script the following commands to build .js files and to build .ts files. Use and install npm package `protobufjs`.
+
+Generate JavaScript files:
+```
+pbjs -t json-module -w commonjs -o azure-functions-language-worker-protobuf/src/rpc.js azure-functions-language-worker-protobuf/src/proto/FunctionRpc.proto
+```
+Generate TypeScript files:
+```
+pbjs -t static-module azure-functions-language-worker-protobuf/src/proto/FunctionRpc.proto -o azure-functions-language-worker-protobuf/src/rpc_static.js && pbts -o azure-functions-language-worker-protobuf/src/rpc.d.ts azure-functions-language-worker-protobuf/src/rpc_static.js
+```
+
+## Java
+Maven plugin : [protobuf-maven-plugin](https://www.xolstice.org/protobuf-maven-plugin/)
+In pom.xml add following under configuration for this plugin
+${basedir}//azure-functions-language-worker-protobuf/src/proto
+
+## Python
+--TODO
+
+## Contributing
+
+This project welcomes contributions and suggestions. Most contributions require you to agree to a
+Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
+the rights to use your contribution. For details, visit https://cla.microsoft.com.
+
+When you submit a pull request, a CLA-bot will automatically determine whether you need to provide
+a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
+provided by the bot. You will only need to do this once across all repos using our CLA.
+
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
+For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
+contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
diff --git a/proxy_worker/protos/_src/src/proto/FunctionRpc.proto b/proxy_worker/protos/_src/src/proto/FunctionRpc.proto
new file mode 100644
index 000000000..f48bc7bbe
--- /dev/null
+++ b/proxy_worker/protos/_src/src/proto/FunctionRpc.proto
@@ -0,0 +1,730 @@
+syntax = "proto3";
+// protobuf vscode extension: https://marketplace.visualstudio.com/items?itemName=zxh404.vscode-proto3
+
+option java_multiple_files = true;
+option java_package = "com.microsoft.azure.functions.rpc.messages";
+option java_outer_classname = "FunctionProto";
+option csharp_namespace = "Microsoft.Azure.WebJobs.Script.Grpc.Messages";
+option go_package ="github.com/Azure/azure-functions-go-worker/internal/rpc";
+
+package AzureFunctionsRpcMessages;
+
+import "google/protobuf/duration.proto";
+import "identity/ClaimsIdentityRpc.proto";
+import "shared/NullableTypes.proto";
+
+// Interface exported by the server.
+service FunctionRpc {
+ rpc EventStream (stream StreamingMessage) returns (stream StreamingMessage) {}
+}
+
+message StreamingMessage {
+ // Used to identify message between host and worker
+ string request_id = 1;
+
+ // Payload of the message
+ oneof content {
+
+ // Worker initiates stream
+ StartStream start_stream = 20;
+
+ // Host sends capabilities/init data to worker
+ WorkerInitRequest worker_init_request = 17;
+ // Worker responds after initializing with its capabilities & status
+ WorkerInitResponse worker_init_response = 16;
+
+ // MESSAGE NOT USED
+ // Worker periodically sends empty heartbeat message to host
+ WorkerHeartbeat worker_heartbeat = 15;
+
+ // Host sends terminate message to worker.
+ // Worker terminates if it can, otherwise host terminates after a grace period
+ WorkerTerminate worker_terminate = 14;
+
+ // Host periodically sends status request to the worker
+ WorkerStatusRequest worker_status_request = 12;
+ WorkerStatusResponse worker_status_response = 13;
+
+ // On file change event, host sends notification to worker
+ FileChangeEventRequest file_change_event_request = 6;
+
+ // Worker requests a desired action (restart worker, reload function)
+ WorkerActionResponse worker_action_response = 7;
+
+ // Host sends required metadata to worker to load function
+ FunctionLoadRequest function_load_request = 8;
+ // Worker responds after loading with the load result
+ FunctionLoadResponse function_load_response = 9;
+
+ // Host requests a given invocation
+ InvocationRequest invocation_request = 4;
+
+ // Worker responds to a given invocation
+ InvocationResponse invocation_response = 5;
+
+ // Host sends cancel message to attempt to cancel an invocation.
+ // If an invocation is cancelled, host will receive an invocation response with status cancelled.
+ InvocationCancel invocation_cancel = 21;
+
+ // Worker logs a message back to the host
+ RpcLog rpc_log = 2;
+
+ FunctionEnvironmentReloadRequest function_environment_reload_request = 25;
+
+ FunctionEnvironmentReloadResponse function_environment_reload_response = 26;
+
+ // Ask the worker to close any open shared memory resources for a given invocation
+ CloseSharedMemoryResourcesRequest close_shared_memory_resources_request = 27;
+ CloseSharedMemoryResourcesResponse close_shared_memory_resources_response = 28;
+
+ // Worker indexing message types
+ FunctionsMetadataRequest functions_metadata_request = 29;
+ FunctionMetadataResponse function_metadata_response = 30;
+
+ // Host sends required metadata to worker to load functions
+ FunctionLoadRequestCollection function_load_request_collection = 31;
+
+ // Host gets the list of function load responses
+ FunctionLoadResponseCollection function_load_response_collection = 32;
+
+ // Host sends required metadata to worker to warmup the worker
+ WorkerWarmupRequest worker_warmup_request = 33;
+
+ // Worker responds after warming up with the warmup result
+ WorkerWarmupResponse worker_warmup_response = 34;
+
+ }
+}
+
+// Process.Start required info
+// connection details
+// protocol type
+// protocol version
+
+// Worker sends the host information identifying itself
+message StartStream {
+ // id of the worker
+ string worker_id = 2;
+}
+
+// Host requests the worker to initialize itself
+message WorkerInitRequest {
+ // version of the host sending init request
+ string host_version = 1;
+
+ // A map of host supported features/capabilities
+ map capabilities = 2;
+
+ // inform worker of supported categories and their levels
+ // i.e. Worker = Verbose, Function.MyFunc = None
+ map log_categories = 3;
+
+ // Full path of worker.config.json location
+ string worker_directory = 4;
+
+ // base directory for function app
+ string function_app_directory = 5;
+}
+
+// Worker responds with the result of initializing itself
+message WorkerInitResponse {
+ // PROPERTY NOT USED
+ // TODO: Remove from protobuf during next breaking change release
+ string worker_version = 1;
+
+ // A map of worker supported features/capabilities
+ map capabilities = 2;
+
+ // Status of the response
+ StatusResult result = 3;
+
+ // Worker metadata captured for telemetry purposes
+ WorkerMetadata worker_metadata = 4;
+}
+
+message WorkerMetadata {
+ // The runtime/stack name
+ string runtime_name = 1;
+
+ // The version of the runtime/stack
+ string runtime_version = 2;
+
+ // The version of the worker
+ string worker_version = 3;
+
+ // The worker bitness/architecture
+ string worker_bitness = 4;
+
+ // Optional additional custom properties
+ map custom_properties = 5;
+}
+
+// Used by the host to determine success/failure/cancellation
+message StatusResult {
+ // Indicates Failure/Success/Cancelled
+ enum Status {
+ Failure = 0;
+ Success = 1;
+ Cancelled = 2;
+ }
+
+ // Status for the given result
+ Status status = 4;
+
+ // Specific message about the result
+ string result = 1;
+
+ // Exception message (if exists) for the status
+ RpcException exception = 2;
+
+ // Captured logs or relevant details can use the logs property
+ repeated RpcLog logs = 3;
+}
+
+// MESSAGE NOT USED
+// TODO: Remove from protobuf during next breaking change release
+message WorkerHeartbeat {}
+
+// Warning before killing the process after grace_period
+// Worker self terminates ..no response on this
+message WorkerTerminate {
+ google.protobuf.Duration grace_period = 1;
+}
+
+// Host notifies worker of file content change
+message FileChangeEventRequest {
+ // Types of File change operations (See link for more info: https://msdn.microsoft.com/en-us/library/t6xf43e0(v=vs.110).aspx)
+ enum Type {
+ Unknown = 0;
+ Created = 1;
+ Deleted = 2;
+ Changed = 4;
+ Renamed = 8;
+ All = 15;
+ }
+
+ // type for this event
+ Type type = 1;
+
+ // full file path for the file change notification
+ string full_path = 2;
+
+ // Name of the function affected
+ string name = 3;
+}
+
+// Indicates whether worker reloaded successfully or needs a restart
+message WorkerActionResponse {
+ // indicates whether a restart is needed, or reload successfully
+ enum Action {
+ Restart = 0;
+ Reload = 1;
+ }
+
+ // action for this response
+ Action action = 1;
+
+ // text reason for the response
+ string reason = 2;
+}
+
+// Used by the host to determine worker health
+message WorkerStatusRequest {
+}
+
+// Worker responds with status message
+// TODO: Add any worker relevant status to response
+message WorkerStatusResponse {
+}
+
+message FunctionEnvironmentReloadRequest {
+ // Environment variables from the current process
+ map environment_variables = 1;
+ // Current directory of function app
+ string function_app_directory = 2;
+}
+
+message FunctionEnvironmentReloadResponse {
+ // After specialization, worker sends capabilities & metadata.
+ // Worker metadata captured for telemetry purposes
+ WorkerMetadata worker_metadata = 1;
+
+ // A map of worker supported features/capabilities
+ map capabilities = 2;
+
+ // Status of the response
+ StatusResult result = 3;
+}
+
+// Tell the out-of-proc worker to close any shared memory maps it allocated for given invocation
+message CloseSharedMemoryResourcesRequest {
+ repeated string map_names = 1;
+}
+
+// Response from the worker indicating which of the shared memory maps have been successfully closed and which have not been closed
+// The key (string) is the map name and the value (bool) is true if it was closed, false if not
+message CloseSharedMemoryResourcesResponse {
+ map close_map_results = 1;
+}
+
+// Host tells the worker to load a list of Functions
+message FunctionLoadRequestCollection {
+ repeated FunctionLoadRequest function_load_requests = 1;
+}
+
+// Host gets the list of function load responses
+message FunctionLoadResponseCollection {
+ repeated FunctionLoadResponse function_load_responses = 1;
+}
+
+// Load request of a single Function
+message FunctionLoadRequest {
+ // unique function identifier (avoid name collisions, facilitate reload case)
+ string function_id = 1;
+
+ // Metadata for the request
+ RpcFunctionMetadata metadata = 2;
+
+ // A flag indicating if managed dependency is enabled or not
+ bool managed_dependency_enabled = 3;
+}
+
+// Worker tells host result of reload
+message FunctionLoadResponse {
+ // unique function identifier
+ string function_id = 1;
+
+ // Result of load operation
+ StatusResult result = 2;
+ // TODO: return type expected?
+
+ // Result of load operation
+ bool is_dependency_downloaded = 3;
+}
+
+// Information on how a Function should be loaded and its bindings
+message RpcFunctionMetadata {
+ // TODO: do we want the host's name - the language worker might do a better job of assignment than the host
+ string name = 4;
+
+ // base directory for the Function
+ string directory = 1;
+
+ // Script file specified
+ string script_file = 2;
+
+ // Entry point specified
+ string entry_point = 3;
+
+ // Bindings info
+ map bindings = 6;
+
+ // Is set to true for proxy
+ bool is_proxy = 7;
+
+ // Function indexing status
+ StatusResult status = 8;
+
+ // Function language
+ string language = 9;
+
+ // Raw binding info
+ repeated string raw_bindings = 10;
+
+ // unique function identifier (avoid name collisions, facilitate reload case)
+ string function_id = 13;
+
+ // A flag indicating if managed dependency is enabled or not
+ bool managed_dependency_enabled = 14;
+
+ // The optional function execution retry strategy to use on invocation failures.
+ RpcRetryOptions retry_options = 15;
+
+ // Properties for function metadata
+ // They're usually specific to a worker and largely passed along to the controller API for use
+ // outside the host
+ map properties = 16;
+}
+
+// Host tells worker it is ready to receive metadata
+message FunctionsMetadataRequest {
+ // base directory for function app
+ string function_app_directory = 1;
+}
+
+// Worker sends function metadata back to host
+message FunctionMetadataResponse {
+ // list of function indexing responses
+ repeated RpcFunctionMetadata function_metadata_results = 1;
+
+ // status of overall metadata request
+ StatusResult result = 2;
+
+ // if set to true then host will perform indexing
+ bool use_default_metadata_indexing = 3;
+}
+
+// Host requests worker to invoke a Function
+message InvocationRequest {
+ // Unique id for each invocation
+ string invocation_id = 1;
+
+ // Unique id for each Function
+ string function_id = 2;
+
+ // Input bindings (include trigger)
+ repeated ParameterBinding input_data = 3;
+
+ // binding metadata from trigger
+ map trigger_metadata = 4;
+
+ // Populates activityId, tracestate and tags from host
+ RpcTraceContext trace_context = 5;
+
+ // Current retry context
+ RetryContext retry_context = 6;
+}
+
+// Host sends ActivityId, traceStateString and Tags from host
+message RpcTraceContext {
+ // This corresponds to Activity.Current?.Id
+ string trace_parent = 1;
+
+ // This corresponds to Activity.Current?.TraceStateString
+ string trace_state = 2;
+
+ // This corresponds to Activity.Current?.Tags
+ map attributes = 3;
+}
+
+// Host sends retry context for a function invocation
+message RetryContext {
+ // Current retry count
+ int32 retry_count = 1;
+
+ // Max retry count
+ int32 max_retry_count = 2;
+
+ // Exception that caused the retry
+ RpcException exception = 3;
+}
+
+// Host requests worker to cancel invocation
+message InvocationCancel {
+ // Unique id for invocation
+ string invocation_id = 2;
+
+ // PROPERTY NOT USED
+ google.protobuf.Duration grace_period = 1;
+}
+
+// Worker responds with status of Invocation
+message InvocationResponse {
+ // Unique id for invocation
+ string invocation_id = 1;
+
+ // Output binding data
+ repeated ParameterBinding output_data = 2;
+
+ // data returned from Function (for $return and triggers with return support)
+ TypedData return_value = 4;
+
+ // Status of the invocation (success/failure/canceled)
+ StatusResult result = 3;
+}
+
+message WorkerWarmupRequest {
+ // Full path of worker.config.json location
+ string worker_directory = 1;
+}
+
+message WorkerWarmupResponse {
+ StatusResult result = 1;
+}
+
+// Used to encapsulate data which could be a variety of types
+message TypedData {
+ oneof data {
+ string string = 1;
+ string json = 2;
+ bytes bytes = 3;
+ bytes stream = 4;
+ RpcHttp http = 5;
+ sint64 int = 6;
+ double double = 7;
+ CollectionBytes collection_bytes = 8;
+ CollectionString collection_string = 9;
+ CollectionDouble collection_double = 10;
+ CollectionSInt64 collection_sint64 = 11;
+ ModelBindingData model_binding_data = 12;
+ CollectionModelBindingData collection_model_binding_data = 13;
+ }
+}
+
+// Specify which type of data is contained in the shared memory region being read
+enum RpcDataType {
+ unknown = 0;
+ string = 1;
+ json = 2;
+ bytes = 3;
+ stream = 4;
+ http = 5;
+ int = 6;
+ double = 7;
+ collection_bytes = 8;
+ collection_string = 9;
+ collection_double = 10;
+ collection_sint64 = 11;
+}
+
+// Used to provide metadata about shared memory region to read data from
+message RpcSharedMemory {
+ // Name of the shared memory map containing data
+ string name = 1;
+ // Offset in the shared memory map to start reading data from
+ int64 offset = 2;
+ // Number of bytes to read (starting from the offset)
+ int64 count = 3;
+ // Final type to which the read data (in bytes) is to be interpreted as
+ RpcDataType type = 4;
+}
+
+// Used to encapsulate collection string
+message CollectionString {
+ repeated string string = 1;
+}
+
+// Used to encapsulate collection bytes
+message CollectionBytes {
+ repeated bytes bytes = 1;
+}
+
+// Used to encapsulate collection double
+message CollectionDouble {
+ repeated double double = 1;
+}
+
+// Used to encapsulate collection sint64
+message CollectionSInt64 {
+ repeated sint64 sint64 = 1;
+}
+
+// Used to describe a given binding on invocation
+message ParameterBinding {
+ // Name for the binding
+ string name = 1;
+
+ oneof rpc_data {
+ // Data for the binding
+ TypedData data = 2;
+
+ // Metadata about the shared memory region to read data from
+ RpcSharedMemory rpc_shared_memory = 3;
+ }
+}
+
+// Used to describe a given binding on load
+message BindingInfo {
+ // Indicates whether it is an input or output binding (or a fancy inout binding)
+ enum Direction {
+ in = 0;
+ out = 1;
+ inout = 2;
+ }
+
+ // Indicates the type of the data for the binding
+ enum DataType {
+ undefined = 0;
+ string = 1;
+ binary = 2;
+ stream = 3;
+ }
+
+ // Type of binding (e.g. HttpTrigger)
+ string type = 2;
+
+ // Direction of the given binding
+ Direction direction = 3;
+
+ DataType data_type = 4;
+
+ // Properties for binding metadata
+ map properties = 5;
+}
+
+// Used to send logs back to the Host
+message RpcLog {
+ // Matching ILogger semantics
+ // https://github.com/aspnet/Logging/blob/9506ccc3f3491488fe88010ef8b9eb64594abf95/src/Microsoft.Extensions.Logging/Logger.cs
+ // Level for the Log
+ enum Level {
+ Trace = 0;
+ Debug = 1;
+ Information = 2;
+ Warning = 3;
+ Error = 4;
+ Critical = 5;
+ None = 6;
+ }
+
+ // Category of the log. Defaults to User if not specified.
+ enum RpcLogCategory {
+ User = 0;
+ System = 1;
+ CustomMetric = 2;
+ }
+
+ // Unique id for invocation (if exists)
+ string invocation_id = 1;
+
+ // TOD: This should be an enum
+ // Category for the log (startup, load, invocation, etc.)
+ string category = 2;
+
+ // Level for the given log message
+ Level level = 3;
+
+ // Message for the given log
+ string message = 4;
+
+ // Id for the even associated with this log (if exists)
+ string event_id = 5;
+
+ // Exception (if exists)
+ RpcException exception = 6;
+
+ // json serialized property bag
+ string properties = 7;
+
+ // Category of the log. Either user(default), system, or custom metric.
+ RpcLogCategory log_category = 8;
+
+ // strongly-typed (ish) property bag
+ map propertiesMap = 9;
+}
+
+// Encapsulates an Exception
+message RpcException {
+ // Source of the exception
+ string source = 3;
+
+ // Stack trace for the exception
+ string stack_trace = 1;
+
+ // Textual message describing the exception
+ string message = 2;
+
+ // Worker specifies whether exception is a user exception,
+ // for purpose of application insights logging. Defaults to false.
+ bool is_user_exception = 4;
+
+ // Type of exception. If it's a user exception, the type is passed along to app insights.
+ // Otherwise, it's ignored for now.
+ string type = 5;
+}
+
+// Http cookie type. Note that only name and value are used for Http requests
+message RpcHttpCookie {
+ // Enum that lets servers require that a cookie shouldn't be sent with cross-site requests
+ enum SameSite {
+ None = 0;
+ Lax = 1;
+ Strict = 2;
+ ExplicitNone = 3;
+ }
+
+ // Cookie name
+ string name = 1;
+
+ // Cookie value
+ string value = 2;
+
+ // Specifies allowed hosts to receive the cookie
+ NullableString domain = 3;
+
+ // Specifies URL path that must exist in the requested URL
+ NullableString path = 4;
+
+ // Sets the cookie to expire at a specific date instead of when the client closes.
+ // It is generally recommended that you use "Max-Age" over "Expires".
+ NullableTimestamp expires = 5;
+
+ // Sets the cookie to only be sent with an encrypted request
+ NullableBool secure = 6;
+
+ // Sets the cookie to be inaccessible to JavaScript's Document.cookie API
+ NullableBool http_only = 7;
+
+ // Allows servers to assert that a cookie ought not to be sent along with cross-site requests
+ SameSite same_site = 8;
+
+ // Number of seconds until the cookie expires. A zero or negative number will expire the cookie immediately.
+ NullableDouble max_age = 9;
+}
+
+// TODO - solidify this or remove it
+message RpcHttp {
+ string method = 1;
+ string url = 2;
+ map headers = 3;
+ TypedData body = 4;
+ map params = 10;
+ string status_code = 12;
+ map query = 15;
+ bool enable_content_negotiation= 16;
+ TypedData rawBody = 17;
+ repeated RpcClaimsIdentity identities = 18;
+ repeated RpcHttpCookie cookies = 19;
+ map nullable_headers = 20;
+ map nullable_params = 21;
+ map nullable_query = 22;
+}
+
+// Message representing Microsoft.Azure.WebJobs.ParameterBindingData
+// Used for hydrating SDK-type bindings in out-of-proc workers
+message ModelBindingData
+{
+ // The version of the binding data content
+ string version = 1;
+
+ // The extension source of the binding data
+ string source = 2;
+
+ // The content type of the binding data content
+ string content_type = 3;
+
+ // The binding data content
+ bytes content = 4;
+}
+
+// Used to encapsulate collection model_binding_data
+message CollectionModelBindingData {
+ repeated ModelBindingData model_binding_data = 1;
+}
+
+// Retry policy which the worker sends the host when the worker indexes
+// a function.
+message RpcRetryOptions
+{
+ // The retry strategy to use. Valid values are fixed delay or exponential backoff.
+ enum RetryStrategy
+ {
+ exponential_backoff = 0;
+ fixed_delay = 1;
+ }
+
+ // The maximum number of retries allowed per function execution.
+ // -1 means to retry indefinitely.
+ int32 max_retry_count = 2;
+
+ // The delay that's used between retries when you're using a fixed delay strategy.
+ google.protobuf.Duration delay_interval = 3;
+
+ // The minimum retry delay when you're using an exponential backoff strategy
+ google.protobuf.Duration minimum_interval = 4;
+
+ // The maximum retry delay when you're using an exponential backoff strategy
+ google.protobuf.Duration maximum_interval = 5;
+
+ RetryStrategy retry_strategy = 6;
+}
\ No newline at end of file
diff --git a/proxy_worker/protos/_src/src/proto/identity/ClaimsIdentityRpc.proto b/proxy_worker/protos/_src/src/proto/identity/ClaimsIdentityRpc.proto
new file mode 100644
index 000000000..c3945bb8a
--- /dev/null
+++ b/proxy_worker/protos/_src/src/proto/identity/ClaimsIdentityRpc.proto
@@ -0,0 +1,26 @@
+syntax = "proto3";
+// protobuf vscode extension: https://marketplace.visualstudio.com/items?itemName=zxh404.vscode-proto3
+
+option java_package = "com.microsoft.azure.functions.rpc.messages";
+
+import "shared/NullableTypes.proto";
+
+// Light-weight representation of a .NET System.Security.Claims.ClaimsIdentity object.
+// This is the same serialization as found in EasyAuth, and needs to be kept in sync with
+// its ClaimsIdentitySlim definition, as seen in the WebJobs extension:
+// https://github.com/Azure/azure-webjobs-sdk-extensions/blob/dev/src/WebJobs.Extensions.Http/ClaimsIdentitySlim.cs
+message RpcClaimsIdentity {
+ NullableString authentication_type = 1;
+ NullableString name_claim_type = 2;
+ NullableString role_claim_type = 3;
+ repeated RpcClaim claims = 4;
+}
+
+// Light-weight representation of a .NET System.Security.Claims.Claim object.
+// This is the same serialization as found in EasyAuth, and needs to be kept in sync with
+// its ClaimSlim definition, as seen in the WebJobs extension:
+// https://github.com/Azure/azure-webjobs-sdk-extensions/blob/dev/src/WebJobs.Extensions.Http/ClaimSlim.cs
+message RpcClaim {
+ string value = 1;
+ string type = 2;
+}
diff --git a/proxy_worker/protos/_src/src/proto/shared/NullableTypes.proto b/proxy_worker/protos/_src/src/proto/shared/NullableTypes.proto
new file mode 100644
index 000000000..4fb476502
--- /dev/null
+++ b/proxy_worker/protos/_src/src/proto/shared/NullableTypes.proto
@@ -0,0 +1,30 @@
+syntax = "proto3";
+// protobuf vscode extension: https://marketplace.visualstudio.com/items?itemName=zxh404.vscode-proto3
+
+option java_package = "com.microsoft.azure.functions.rpc.messages";
+
+import "google/protobuf/timestamp.proto";
+
+message NullableString {
+ oneof string {
+ string value = 1;
+ }
+}
+
+message NullableDouble {
+ oneof double {
+ double value = 1;
+ }
+}
+
+message NullableBool {
+ oneof bool {
+ bool value = 1;
+ }
+}
+
+message NullableTimestamp {
+ oneof timestamp {
+ google.protobuf.Timestamp value = 1;
+ }
+}
diff --git a/pyproject.toml b/pyproject.toml
index 109e1c34b..ce8a5063b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -26,15 +26,20 @@ classifiers = [
"Intended Audience :: Developers"
]
dependencies = [
- "azure-functions==1.23.0b3",
- "python-dateutil ~=2.9.0",
+ "azure-functions==1.23.0",
+ "python-dateutil~=2.9.0",
"protobuf~=3.19.3; python_version == '3.7'",
- "protobuf~=5.29.0; python_version >= '3.8'",
+ "protobuf~=4.25.3; python_version >= '3.8' and python_version < '3.13'",
+ "protobuf~=5.29.0; python_version >= '3.13'",
"grpcio-tools~=1.43.0; python_version == '3.7'",
- "grpcio-tools~=1.70.0; python_version >= '3.8'",
+ "grpcio-tools~=1.59.0; python_version >= '3.8' and python_version < '3.13'",
+ "grpcio-tools~=1.70.0; python_version >= '3.13'",
"grpcio~=1.43.0; python_version == '3.7'",
- "grpcio~=1.70.0; python_version >= '3.8'",
- "azurefunctions-extensions-base; python_version >= '3.8'"
+ "grpcio ~=1.59.0; python_version >= '3.8' and python_version < '3.13'",
+ "grpcio~=1.70.0; python_version >= '3.13'",
+ "azurefunctions-extensions-base; python_version >= '3.8'",
+ "test-worker==1.0.0a38; python_version >= '3.13'",
+ "test-worker-v1==1.0.0a11; python_version >= '3.13'"
]
[project.urls]
diff --git a/python/proxyV4/worker.py b/python/proxyV4/worker.py
index dce5d51e6..2f899f37e 100644
--- a/python/proxyV4/worker.py
+++ b/python/proxyV4/worker.py
@@ -2,7 +2,6 @@
import pathlib
import sys
-
PKGS_PATH = "/home/site/wwwroot/.python_packages"
PKGS = "lib/site-packages"
diff --git a/python/test/worker.py b/python/test/worker.py
index a15d160ed..95790083f 100644
--- a/python/test/worker.py
+++ b/python/test/worker.py
@@ -1,7 +1,6 @@
import sys
import os
-from azure_functions_worker import main
-from proxy_worker import start_worker
+
# Azure environment variables
AZURE_WEBJOBS_SCRIPT_ROOT = "AzureWebJobsScriptRoot"
@@ -16,7 +15,10 @@ def add_script_root_to_sys_path():
if __name__ == '__main__':
add_script_root_to_sys_path()
- if sys.version_info.minor >= 13:
- start_worker.start()
- else:
+ minor_version = sys.version_info[1]
+ if minor_version < 13:
+ from azure_functions_worker import main
main.main()
+ else:
+ from proxy_worker import start_worker
+ start_worker.start()
diff --git a/setup.cfg b/setup.cfg
index 6f5a7fb98..5dde99ef5 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -18,3 +18,9 @@ ignore_errors = True
[mypy-azure_functions_worker._thirdparty.typing_inspect]
ignore_errors = True
+
+[mypy-proxy_worker.protos.*]
+ignore_errors = True
+
+[mypy-proxy_worker._thirdparty.typing_inspect]
+ignore_errors = True
\ No newline at end of file
diff --git a/tests/extension_tests/http_v2_tests/test_http_v2.py b/tests/extension_tests/http_v2_tests/test_http_v2.py
index 8c1d5b48e..514633743 100644
--- a/tests/extension_tests/http_v2_tests/test_http_v2.py
+++ b/tests/extension_tests/http_v2_tests/test_http_v2.py
@@ -8,11 +8,16 @@
import requests
from tests.utils import testutils
-from azure_functions_worker.utils.common import is_envvar_true
from tests.utils.constants import CONSUMPTION_DOCKER_TEST, DEDICATED_DOCKER_TEST
+# This app setting is only present for Python < 3.13
from azure_functions_worker.constants import PYTHON_ENABLE_INIT_INDEXING
+if sys.version_info.minor < 13:
+ from azure_functions_worker.utils.common import is_envvar_true
+else:
+ from proxy_worker.utils.common import is_envvar_true
+
REQUEST_TIMEOUT_SEC = 5
diff --git a/tests/unittests/test_code_quality.py b/tests/unittests/test_code_quality.py
index 54d1cc725..45f7bda47 100644
--- a/tests/unittests/test_code_quality.py
+++ b/tests/unittests/test_code_quality.py
@@ -17,7 +17,7 @@ def test_mypy(self):
try:
subprocess.run(
- [sys.executable, '-m', 'mypy', '-m', 'azure_functions_worker'],
+ [sys.executable, '-m', 'mypy', '-m', 'azure_functions_worker', 'proxy_worker'],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
diff --git a/tests/utils/testutils.py b/tests/utils/testutils.py
index c04b134c5..360fca2e5 100644
--- a/tests/utils/testutils.py
+++ b/tests/utils/testutils.py
@@ -50,18 +50,23 @@
WebHostDedicated,
)
-from azure_functions_worker import dispatcher, protos
-from azure_functions_worker.bindings.shared_memory_data_transfer import (
- FileAccessorFactory,
-)
-from azure_functions_worker.bindings.shared_memory_data_transfer import (
- SharedMemoryConstants as consts,
-)
-from azure_functions_worker.constants import (
- FUNCTIONS_WORKER_SHARED_MEMORY_DATA_TRANSFER_ENABLED,
- UNIX_SHARED_MEMORY_DIRECTORIES,
-)
-from azure_functions_worker.utils.common import get_app_setting, is_envvar_true
+if sys.version_info.minor < 13:
+ from azure_functions_worker import dispatcher, protos
+ from azure_functions_worker.bindings.shared_memory_data_transfer import (
+ FileAccessorFactory,
+ )
+ from azure_functions_worker.bindings.shared_memory_data_transfer import (
+ SharedMemoryConstants as consts,
+ )
+ from azure_functions_worker.constants import (
+ FUNCTIONS_WORKER_SHARED_MEMORY_DATA_TRANSFER_ENABLED,
+ UNIX_SHARED_MEMORY_DIRECTORIES,
+ )
+ from azure_functions_worker.utils.common import get_app_setting, is_envvar_true
+else:
+ from proxy_worker import dispatcher, protos
+ from proxy_worker.utils.common import is_envvar_true
+ from proxy_worker.utils.app_settings import get_app_setting
TESTS_ROOT = PROJECT_ROOT / 'tests'
E2E_TESTS_FOLDER = pathlib.Path('endtoend')
@@ -320,125 +325,126 @@ def _run_test(self, test, *args, **kwargs):
if test_exception is not None:
raise test_exception
-
-class SharedMemoryTestCase(unittest.TestCase):
- """
- For tests involving shared memory data transfer usage.
- """
-
- def setUp(self):
- self.was_shmem_env_true = is_envvar_true(
- FUNCTIONS_WORKER_SHARED_MEMORY_DATA_TRANSFER_ENABLED)
- os.environ.update(
- {FUNCTIONS_WORKER_SHARED_MEMORY_DATA_TRANSFER_ENABLED: '1'})
-
- os_name = platform.system()
- if os_name == 'Darwin':
- # If an existing AppSetting is specified, save it so it can be
- # restored later
- self.was_shmem_dirs = get_app_setting(
- UNIX_SHARED_MEMORY_DIRECTORIES
- )
- self._setUpDarwin()
- elif os_name == 'Linux':
- self._setUpLinux()
- self.file_accessor = FileAccessorFactory.create_file_accessor()
-
- def tearDown(self):
- os_name = platform.system()
- if os_name == 'Darwin':
- self._tearDownDarwin()
- if self.was_shmem_dirs is not None:
- # If an AppSetting was set before the tests ran, restore it back
- os.environ.update(
- {UNIX_SHARED_MEMORY_DIRECTORIES: self.was_shmem_dirs})
- elif os_name == 'Linux':
- self._tearDownLinux()
-
- if not self.was_shmem_env_true:
- os.environ.update(
- {FUNCTIONS_WORKER_SHARED_MEMORY_DATA_TRANSFER_ENABLED: '0'})
-
- def get_new_mem_map_name(self):
- return str(uuid.uuid4())
-
- def get_random_bytes(self, num_bytes):
- return bytearray(random.getrandbits(8) for _ in range(num_bytes))
-
- def get_random_string(self, num_chars):
- return ''.join(random.choices(string.ascii_uppercase + string.digits,
- k=num_chars))
-
- def is_valid_uuid(self, uuid_to_test: str, version: int = 4) -> bool:
+# This is not supported in 3.13+
+if sys.version_info.minor < 13:
+ class SharedMemoryTestCase(unittest.TestCase):
"""
- Check if uuid_to_test is a valid UUID.
- Reference: https://stackoverflow.com/a/33245493/3132415
+ For tests involving shared memory data transfer usage.
"""
- try:
- uuid_obj = uuid.UUID(uuid_to_test, version=version)
- except ValueError:
- return False
- return str(uuid_obj) == uuid_to_test
- def _createSharedMemoryDirectories(self, directories):
- for temp_dir in directories:
- temp_dir_path = os.path.join(temp_dir, consts.UNIX_TEMP_DIR_SUFFIX)
- if not os.path.exists(temp_dir_path):
- os.makedirs(temp_dir_path)
+ def setUp(self):
+ self.was_shmem_env_true = is_envvar_true(
+ FUNCTIONS_WORKER_SHARED_MEMORY_DATA_TRANSFER_ENABLED)
+ os.environ.update(
+ {FUNCTIONS_WORKER_SHARED_MEMORY_DATA_TRANSFER_ENABLED: '1'})
+
+ os_name = platform.system()
+ if os_name == 'Darwin':
+ # If an existing AppSetting is specified, save it so it can be
+ # restored later
+ self.was_shmem_dirs = get_app_setting(
+ UNIX_SHARED_MEMORY_DIRECTORIES
+ )
+ self._setUpDarwin()
+ elif os_name == 'Linux':
+ self._setUpLinux()
+ self.file_accessor = FileAccessorFactory.create_file_accessor()
+
+ def tearDown(self):
+ os_name = platform.system()
+ if os_name == 'Darwin':
+ self._tearDownDarwin()
+ if self.was_shmem_dirs is not None:
+ # If an AppSetting was set before the tests ran, restore it back
+ os.environ.update(
+ {UNIX_SHARED_MEMORY_DIRECTORIES: self.was_shmem_dirs})
+ elif os_name == 'Linux':
+ self._tearDownLinux()
+
+ if not self.was_shmem_env_true:
+ os.environ.update(
+ {FUNCTIONS_WORKER_SHARED_MEMORY_DATA_TRANSFER_ENABLED: '0'})
- def _deleteSharedMemoryDirectories(self, directories):
- for temp_dir in directories:
- temp_dir_path = os.path.join(temp_dir, consts.UNIX_TEMP_DIR_SUFFIX)
- shutil.rmtree(temp_dir_path)
+ def get_new_mem_map_name(self):
+ return str(uuid.uuid4())
- def _setUpLinux(self):
- self._createSharedMemoryDirectories(consts.UNIX_TEMP_DIRS)
+ def get_random_bytes(self, num_bytes):
+ return bytearray(random.getrandbits(8) for _ in range(num_bytes))
- def _tearDownLinux(self):
- self._deleteSharedMemoryDirectories(consts.UNIX_TEMP_DIRS)
+ def get_random_string(self, num_chars):
+ return ''.join(random.choices(string.ascii_uppercase + string.digits,
+ k=num_chars))
- def _setUpDarwin(self):
- """
- Create a RAM disk on macOS.
- Ref: https://stackoverflow.com/a/2033417/3132415
- """
- size_in_mb = consts.MAX_BYTES_FOR_SHARED_MEM_TRANSFER / (1024 * 1024)
- size = 2048 * size_in_mb
- # The following command returns the name of the created disk
- cmd = ['hdiutil', 'attach', '-nomount', f'ram://{size}']
- result = subprocess.run(cmd, stdout=subprocess.PIPE)
- if result.returncode != 0:
- raise IOError(f'Cannot create ram disk with command: {cmd} - '
- f'{result.stdout} - {result.stderr}')
- disk_name = result.stdout.strip().decode()
- # We create a volume on the disk created above and mount it
- volume_name = 'shm'
- cmd = ['diskutil', 'eraseVolume', 'HFS+', volume_name, disk_name]
- result = subprocess.run(cmd, stdout=subprocess.PIPE)
- if result.returncode != 0:
- raise IOError(f'Cannot create volume with command: {cmd} - '
- f'{result.stdout} - {result.stderr}')
- directory = f'/Volumes/{volume_name}'
- self.created_directories = [directory]
- # Create directories in the volume for shared memory maps
- self._createSharedMemoryDirectories(self.created_directories)
- # Override the AppSetting for the duration of this test so the
- # FileAccessorUnix can use these directories for creating memory maps
- os.environ.update(
- {UNIX_SHARED_MEMORY_DIRECTORIES: ','.join(self.created_directories)}
- )
+ def is_valid_uuid(self, uuid_to_test: str, version: int = 4) -> bool:
+ """
+ Check if uuid_to_test is a valid UUID.
+ Reference: https://stackoverflow.com/a/33245493/3132415
+ """
+ try:
+ uuid_obj = uuid.UUID(uuid_to_test, version=version)
+ except ValueError:
+ return False
+ return str(uuid_obj) == uuid_to_test
+
+ def _createSharedMemoryDirectories(self, directories):
+ for temp_dir in directories:
+ temp_dir_path = os.path.join(temp_dir, consts.UNIX_TEMP_DIR_SUFFIX)
+ if not os.path.exists(temp_dir_path):
+ os.makedirs(temp_dir_path)
+
+ def _deleteSharedMemoryDirectories(self, directories):
+ for temp_dir in directories:
+ temp_dir_path = os.path.join(temp_dir, consts.UNIX_TEMP_DIR_SUFFIX)
+ shutil.rmtree(temp_dir_path)
+
+ def _setUpLinux(self):
+ self._createSharedMemoryDirectories(consts.UNIX_TEMP_DIRS)
+
+ def _tearDownLinux(self):
+ self._deleteSharedMemoryDirectories(consts.UNIX_TEMP_DIRS)
+
+ def _setUpDarwin(self):
+ """
+ Create a RAM disk on macOS.
+ Ref: https://stackoverflow.com/a/2033417/3132415
+ """
+ size_in_mb = consts.MAX_BYTES_FOR_SHARED_MEM_TRANSFER / (1024 * 1024)
+ size = 2048 * size_in_mb
+ # The following command returns the name of the created disk
+ cmd = ['hdiutil', 'attach', '-nomount', f'ram://{size}']
+ result = subprocess.run(cmd, stdout=subprocess.PIPE)
+ if result.returncode != 0:
+ raise IOError(f'Cannot create ram disk with command: {cmd} - '
+ f'{result.stdout} - {result.stderr}')
+ disk_name = result.stdout.strip().decode()
+ # We create a volume on the disk created above and mount it
+ volume_name = 'shm'
+ cmd = ['diskutil', 'eraseVolume', 'HFS+', volume_name, disk_name]
+ result = subprocess.run(cmd, stdout=subprocess.PIPE)
+ if result.returncode != 0:
+ raise IOError(f'Cannot create volume with command: {cmd} - '
+ f'{result.stdout} - {result.stderr}')
+ directory = f'/Volumes/{volume_name}'
+ self.created_directories = [directory]
+ # Create directories in the volume for shared memory maps
+ self._createSharedMemoryDirectories(self.created_directories)
+ # Override the AppSetting for the duration of this test so the
+ # FileAccessorUnix can use these directories for creating memory maps
+ os.environ.update(
+ {UNIX_SHARED_MEMORY_DIRECTORIES: ','.join(self.created_directories)}
+ )
- def _tearDownDarwin(self):
- # Delete the directories containing shared memory maps
- self._deleteSharedMemoryDirectories(self.created_directories)
- # Unmount the volume used for shared memory maps
- volume_name = 'shm'
- cmd = f"find /Volumes -type d -name '{volume_name}*' -print0 " \
- "| xargs -0 umount -f"
- result = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
- if result.returncode != 0:
- raise IOError(f'Cannot delete volume with command: {cmd} - '
- f'{result.stdout} - {result.stderr}')
+ def _tearDownDarwin(self):
+ # Delete the directories containing shared memory maps
+ self._deleteSharedMemoryDirectories(self.created_directories)
+ # Unmount the volume used for shared memory maps
+ volume_name = 'shm'
+ cmd = f"find /Volumes -type d -name '{volume_name}*' -print0 " \
+ "| xargs -0 umount -f"
+ result = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
+ if result.returncode != 0:
+ raise IOError(f'Cannot delete volume with command: {cmd} - '
+ f'{result.stdout} - {result.stderr}')
class _MockWebHostServicer(protos.FunctionRpcServicer):