@@ -140,6 +140,7 @@ def get_sync_tp_workers_set(self):
140
140
"""We don't know the exact value of the threadcount set for the Python
141
141
3.9 scenarios (as we'll start passing only None by default), and we
142
142
need to get that information.
143
+
143
144
Ref: concurrent.futures.thread.ThreadPoolExecutor.__init__._max_workers
144
145
"""
145
146
return self ._sync_call_tp ._max_workers
@@ -193,19 +194,19 @@ async def dispatch_forever(self): # sourcery skip: swap-if-expression
193
194
194
195
root_logger .setLevel (log_level )
195
196
root_logger .addHandler (logging_handler )
196
- logger .info (" Switched to gRPC logging." )
197
+ logger .info (' Switched to gRPC logging.' )
197
198
logging_handler .flush ()
198
199
199
200
try :
200
201
await forever
201
202
finally :
202
- logger .warning (" Detaching gRPC logging due to exception." )
203
+ logger .warning (' Detaching gRPC logging due to exception.' )
203
204
logging_handler .flush ()
204
205
root_logger .removeHandler (logging_handler )
205
206
206
207
# Reenable console logging when there's an exception
207
208
enable_console_logging ()
208
- logger .warning (" Switched to console logging due to exception." )
209
+ logger .warning (' Switched to console logging due to exception.' )
209
210
finally :
210
211
DispatcherMeta .__current_dispatcher__ = None
211
212
@@ -235,12 +236,12 @@ def on_logging(self, record: logging.LogRecord,
235
236
elif record .levelno >= logging .DEBUG :
236
237
log_level = protos .RpcLog .Debug
237
238
else :
238
- log_level = getattr (protos .RpcLog , " None" )
239
+ log_level = getattr (protos .RpcLog , ' None' )
239
240
240
241
if is_system_log_category (record .name ):
241
- log_category = protos .RpcLog .RpcLogCategory .Value (" System" )
242
+ log_category = protos .RpcLog .RpcLogCategory .Value (' System' )
242
243
else : # customers using logging will yield 'root' in record.name
243
- log_category = protos .RpcLog .RpcLogCategory .Value (" User" )
244
+ log_category = protos .RpcLog .RpcLogCategory .Value (' User' )
244
245
245
246
log = dict (
246
247
level = log_level ,
@@ -251,7 +252,7 @@ def on_logging(self, record: logging.LogRecord,
251
252
252
253
invocation_id = get_current_invocation_id ()
253
254
if invocation_id is not None :
254
- log [" invocation_id" ] = invocation_id
255
+ log [' invocation_id' ] = invocation_id
255
256
256
257
self ._grpc_resp_queue .put_nowait (
257
258
protos .StreamingMessage (
@@ -270,21 +271,21 @@ def worker_id(self) -> str:
270
271
@staticmethod
271
272
def _serialize_exception (exc : Exception ):
272
273
try :
273
- message = f" { type (exc ).__name__ } : { exc } "
274
+ message = f' { type (exc ).__name__ } : { exc } '
274
275
except Exception :
275
276
message = ('Unhandled exception in function. '
276
277
'Could not serialize original exception message.' )
277
278
278
279
try :
279
280
stack_trace = marshall_exception_trace (exc )
280
281
except Exception :
281
- stack_trace = ""
282
+ stack_trace = ''
282
283
283
284
return protos .RpcException (message = message , stack_trace = stack_trace )
284
285
285
286
async def _dispatch_grpc_request (self , request ):
286
- content_type = request .WhichOneof (" content" )
287
- request_handler = getattr (self , f" _handle__{ content_type } " , None )
287
+ content_type = request .WhichOneof (' content' )
288
+ request_handler = getattr (self , f' _handle__{ content_type } ' , None )
288
289
if request_handler is None :
289
290
# Don't crash on unknown messages. Some of them can be ignored;
290
291
# and if something goes really wrong the host can always just
@@ -356,16 +357,16 @@ def update_opentelemetry_status(self):
356
357
async def _handle__worker_init_request (self , request ):
357
358
worker_init_request = request .worker_init_request
358
359
config_manager .set_config (
359
- os .path .join (worker_init_request .function_app_directory , " az-config.json" )
360
+ os .path .join (worker_init_request .function_app_directory , ' az-config.json' )
360
361
)
361
362
logger .info (
362
- " Received WorkerInitRequest, "
363
- " python version %s, "
364
- " worker version %s, "
365
- " request ID %s. "
366
- " App Settings state: %s. "
367
- " To enable debug level logging, please refer to "
368
- " https://aka.ms/python-enable-debug-logging" ,
363
+ ' Received WorkerInitRequest, '
364
+ ' python version %s, '
365
+ ' worker version %s, '
366
+ ' request ID %s. '
367
+ ' App Settings state: %s. '
368
+ ' To enable debug level logging, please refer to '
369
+ ' https://aka.ms/python-enable-debug-logging' ,
369
370
sys .version ,
370
371
VERSION ,
371
372
self .request_id ,
@@ -441,7 +442,7 @@ def load_function_metadata(self, function_app_directory, caller_info):
441
442
"""
442
443
script_file_name = config_manager .get_app_setting (
443
444
setting = PYTHON_SCRIPT_FILE_NAME ,
444
- default_value = f" { PYTHON_SCRIPT_FILE_NAME_DEFAULT } " )
445
+ default_value = f' { PYTHON_SCRIPT_FILE_NAME_DEFAULT } ' )
445
446
446
447
logger .debug (
447
448
'Received load metadata request from %s, request ID %s, '
@@ -469,7 +470,7 @@ async def _handle__functions_metadata_request(self, request):
469
470
script_file_name )
470
471
471
472
logger .info (
472
- " Received WorkerMetadataRequest, request ID %s, " " function_path: %s" ,
473
+ ' Received WorkerMetadataRequest, request ID %s, ' ' function_path: %s' ,
473
474
self .request_id ,
474
475
function_path ,
475
476
)
@@ -659,7 +660,7 @@ async def _handle__invocation_request(self, request):
659
660
# for a customer's threads
660
661
fi_context .thread_local_storage .invocation_id = invocation_id
661
662
if fi .requires_context :
662
- args [" context" ] = fi_context
663
+ args [' context' ] = fi_context
663
664
664
665
if fi .output_types :
665
666
for name in fi .output_types :
@@ -767,7 +768,7 @@ async def _handle__function_environment_reload_request(self, request):
767
768
os .environ [var ] = env_vars [var ]
768
769
config_manager .set_config (
769
770
os .path .join (
770
- func_env_reload_request .function_app_directory , " az-config.json"
771
+ func_env_reload_request .function_app_directory , ' az-config.json'
771
772
)
772
773
)
773
774
@@ -799,7 +800,8 @@ async def _handle__function_environment_reload_request(self, request):
799
800
if config_manager .is_envvar_true (PYTHON_ENABLE_INIT_INDEXING ):
800
801
try :
801
802
self .load_function_metadata (
802
- directory , caller_info = "environment_reload_request" )
803
+ directory ,
804
+ caller_info = "environment_reload_request" )
803
805
804
806
if HttpV2Registry .http_v2_enabled ():
805
807
capabilities [HTTP_URI ] = \
@@ -819,7 +821,8 @@ async def _handle__function_environment_reload_request(self, request):
819
821
success_response = protos .FunctionEnvironmentReloadResponse (
820
822
capabilities = capabilities ,
821
823
worker_metadata = self .get_worker_metadata (),
822
- result = protos .StatusResult (status = protos .StatusResult .Success ))
824
+ result = protos .StatusResult (
825
+ status = protos .StatusResult .Success ))
823
826
824
827
return protos .StreamingMessage (
825
828
request_id = self .request_id ,
@@ -855,7 +858,7 @@ def index_functions(self, function_path: str, function_dir: str):
855
858
func_binding_logs = fx_bindings_logs .get (func )
856
859
for binding in func .get_bindings ():
857
860
deferred_binding_info = func_binding_logs .get (
858
- binding .name ) \
861
+ binding .name )\
859
862
if func_binding_logs .get (binding .name ) else ""
860
863
indexed_function_bindings_logs .append ((
861
864
binding .type , binding .name , deferred_binding_info ))
0 commit comments