@@ -183,21 +183,24 @@ def __init__(self, influxdb_client, write_options: WriteOptions = WriteOptions()
183
183
def write (self , bucket : str , org : str = None ,
184
184
record : Union [
185
185
str , List ['str' ], Point , List ['Point' ], dict , List ['dict' ], bytes , List ['bytes' ], Observable ] = None ,
186
- write_precision : WritePrecision = DEFAULT_WRITE_PRECISION ) -> None :
186
+ write_precision : WritePrecision = DEFAULT_WRITE_PRECISION , data_frame_measurement_name : str = None ,
187
+ data_frame_tag_columns : List ['str' ] = None ) -> None :
187
188
"""
188
189
Writes time-series data into influxdb.
189
190
190
191
:param str org: specifies the destination organization for writes; take either the ID or Name interchangeably; if both orgID and org are specified, org takes precedence. (required)
191
192
:param str bucket: specifies the destination bucket for writes (required)
192
193
:param WritePrecision write_precision: specifies the precision for the unix timestamps within the body line-protocol
193
- :param record: Points, line protocol, RxPY Observable to write
194
+ :param record: Points, line protocol, Pandas DataFrame, RxPY Observable to write
195
+ :param data_frame_measurement_name: name of measurement for writing Pandas DataFrame
196
+ :param data_frame_tag_columns: list of DataFrame columns which are tags, rest columns will be fields
194
197
195
198
"""
196
199
197
200
if org is None :
198
201
org = self ._influxdb_client .org
199
202
200
- if self ._point_settings .defaultTags and record :
203
+ if self ._point_settings .defaultTags and record is not None :
201
204
for key , val in self ._point_settings .defaultTags .items ():
202
205
if isinstance (record , dict ):
203
206
record .get ("tags" )[key ] = val
@@ -211,7 +214,9 @@ def write(self, bucket: str, org: str = None,
211
214
if self ._write_options .write_type is WriteType .batching :
212
215
return self ._write_batching (bucket , org , record , write_precision )
213
216
214
- final_string = self ._serialize (record , write_precision )
217
+ final_string = self ._serialize (record , write_precision ,
218
+ data_frame_measurement_name ,
219
+ data_frame_tag_columns )
215
220
216
221
_async_req = True if self ._write_options .write_type == WriteType .asynchronous else False
217
222
@@ -235,7 +240,7 @@ def __del__(self):
235
240
self ._disposable = None
236
241
pass
237
242
238
- def _serialize (self , record , write_precision ) -> bytes :
243
+ def _serialize (self , record , write_precision , data_frame_measurement_name , data_frame_tag_columns ) -> bytes :
239
244
_result = b''
240
245
if isinstance (record , bytes ):
241
246
_result = record
@@ -244,40 +249,103 @@ def _serialize(self, record, write_precision) -> bytes:
244
249
_result = record .encode ("utf-8" )
245
250
246
251
elif isinstance (record , Point ):
247
- _result = self ._serialize (record .to_line_protocol (), write_precision = write_precision )
252
+ _result = self ._serialize (record .to_line_protocol (), write_precision ,
253
+ data_frame_measurement_name , data_frame_tag_columns )
248
254
249
255
elif isinstance (record , dict ):
250
256
_result = self ._serialize (Point .from_dict (record , write_precision = write_precision ),
251
- write_precision = write_precision )
257
+ write_precision ,
258
+ data_frame_measurement_name , data_frame_tag_columns )
259
+ elif 'DataFrame' in type (record ).__name__ :
260
+ _result = self ._serialize (self ._data_frame_to_list_of_points (record , data_frame_measurement_name ,
261
+ data_frame_tag_columns ,
262
+ precision = write_precision ),
263
+ write_precision ,
264
+ data_frame_measurement_name , data_frame_tag_columns )
265
+
252
266
elif isinstance (record , list ):
253
- _result = b'\n ' .join ([self ._serialize (item , write_precision = write_precision ) for item in record ])
267
+ _result = b'\n ' .join ([self ._serialize (item , write_precision ,
268
+ data_frame_measurement_name , data_frame_tag_columns ) for item in record ])
254
269
255
270
return _result
256
271
257
- def _write_batching (self , bucket , org , data , precision = DEFAULT_WRITE_PRECISION ):
272
+ def _write_batching (self , bucket , org , data ,
273
+ data_frame_measurement_name , data_frame_tag_columns ,
274
+ precision = DEFAULT_WRITE_PRECISION ):
258
275
_key = _BatchItemKey (bucket , org , precision )
259
276
if isinstance (data , bytes ):
260
277
self ._subject .on_next (_BatchItem (key = _key , data = data ))
261
278
262
279
elif isinstance (data , str ):
263
- self ._write_batching (bucket , org , data .encode ("utf-8" ), precision )
280
+ self ._write_batching (bucket , org , data .encode ("utf-8" ),
281
+ data_frame_measurement_name , data_frame_tag_columns , precision )
264
282
265
283
elif isinstance (data , Point ):
266
- self ._write_batching (bucket , org , data .to_line_protocol (), precision )
284
+ self ._write_batching (bucket , org , data .to_line_protocol (),
285
+ data_frame_measurement_name , data_frame_tag_columns , precision )
267
286
268
287
elif isinstance (data , dict ):
269
- self ._write_batching (bucket , org , Point .from_dict (data , write_precision = precision ), precision )
288
+ self ._write_batching (bucket , org , Point .from_dict (data , write_precision = precision ),
289
+ data_frame_measurement_name , data_frame_tag_columns , precision )
290
+
291
+ elif 'DataFrame' in type (data ).__name__ :
292
+ self ._write_batching (bucket , org , self ._data_frame_to_list_of_points (data , data_frame_measurement_name ,
293
+ data_frame_tag_columns , precision ),
294
+ data_frame_measurement_name , data_frame_tag_columns , precision )
270
295
271
296
elif isinstance (data , list ):
272
297
for item in data :
273
- self ._write_batching (bucket , org , item , precision )
298
+ self ._write_batching (bucket , org , item ,
299
+ data_frame_measurement_name , data_frame_tag_columns , precision )
274
300
275
301
elif isinstance (data , Observable ):
276
- data .subscribe (lambda it : self ._write_batching (bucket , org , it , precision ))
302
+ data .subscribe (lambda it : self ._write_batching (bucket , org , it ,
303
+ data_frame_measurement_name , data_frame_tag_columns ,
304
+ precision ))
277
305
pass
278
306
279
307
return None
280
308
309
+ def _data_frame_to_list_of_points (self , dataframe , data_frame_measurement_name , data_frame_tag_columns , precision = 's' ):
310
+ from ..extras import pd
311
+ if not isinstance (dataframe , pd .DataFrame ):
312
+ raise TypeError ('Must be DataFrame, but type was: {0}.'
313
+ .format (type (dataframe )))
314
+ if not (isinstance (dataframe .index , pd .PeriodIndex ) or
315
+ isinstance (dataframe .index , pd .DatetimeIndex )):
316
+ raise TypeError ('Must be DataFrame with DatetimeIndex or \
317
+ PeriodIndex.' )
318
+
319
+ if isinstance (dataframe .index , pd .PeriodIndex ):
320
+ dataframe .index = dataframe .index .to_timestamp ()
321
+ else :
322
+ dataframe .index = pd .to_datetime (dataframe .index )
323
+
324
+ if dataframe .index .tzinfo is None :
325
+ dataframe .index = dataframe .index .tz_localize ('UTC' )
326
+
327
+ data = []
328
+
329
+ c = 0
330
+ for v in dataframe .values :
331
+ point = Point (measurement_name = data_frame_measurement_name )
332
+
333
+ count = 0
334
+ for f in v :
335
+ column = dataframe .columns [count ]
336
+ if data_frame_tag_columns and column in data_frame_tag_columns :
337
+ point .tag (column , f )
338
+ else :
339
+ point .field (column , f )
340
+ count += 1
341
+
342
+ point .time (dataframe .index [c ], precision )
343
+ c += 1
344
+
345
+ data .append (point )
346
+
347
+ return data
348
+
281
349
def _http (self , batch_item : _BatchItem ):
282
350
283
351
logger .debug ("Write time series data into InfluxDB: %s" , batch_item )
0 commit comments