@@ -183,21 +183,23 @@ def __init__(self, influxdb_client, write_options: WriteOptions = WriteOptions()
183183 def write (self , bucket : str , org : str = None ,
184184 record : Union [
185185 str , List ['str' ], Point , List ['Point' ], dict , List ['dict' ], bytes , List ['bytes' ], Observable ] = None ,
186- write_precision : WritePrecision = DEFAULT_WRITE_PRECISION ) -> None :
186+ write_precision : WritePrecision = DEFAULT_WRITE_PRECISION , ** kwargs ) -> None :
187187 """
188188 Writes time-series data into influxdb.
189189
190190 :param str org: specifies the destination organization for writes; take either the ID or Name interchangeably; if both orgID and org are specified, org takes precedence. (required)
191191 :param str bucket: specifies the destination bucket for writes (required)
192192 :param WritePrecision write_precision: specifies the precision for the unix timestamps within the body line-protocol
193- :param record: Points, line protocol, RxPY Observable to write
193+ :param record: Points, line protocol, Pandas DataFrame, RxPY Observable to write
194+ :param data_frame_measurement_name: name of measurement for writing Pandas DataFrame
195+ :param data_frame_tag_columns: list of DataFrame columns which are tags, rest columns will be fields
194196
195197 """
196198
197199 if org is None :
198200 org = self ._influxdb_client .org
199201
200- if self ._point_settings .defaultTags and record :
202+ if self ._point_settings .defaultTags and record is not None :
201203 for key , val in self ._point_settings .defaultTags .items ():
202204 if isinstance (record , dict ):
203205 record .get ("tags" )[key ] = val
@@ -209,9 +211,10 @@ def write(self, bucket: str, org: str = None,
209211 r .tag (key , val )
210212
211213 if self ._write_options .write_type is WriteType .batching :
212- return self ._write_batching (bucket , org , record , write_precision )
214+ return self ._write_batching (bucket , org , record ,
215+ write_precision , ** kwargs )
213216
214- final_string = self ._serialize (record , write_precision )
217+ final_string = self ._serialize (record , write_precision , ** kwargs )
215218
216219 _async_req = True if self ._write_options .write_type == WriteType .asynchronous else False
217220
@@ -235,7 +238,7 @@ def __del__(self):
235238 self ._disposable = None
236239 pass
237240
238- def _serialize (self , record , write_precision ) -> bytes :
241+ def _serialize (self , record , write_precision , ** kwargs ) -> bytes :
239242 _result = b''
240243 if isinstance (record , bytes ):
241244 _result = record
@@ -244,40 +247,96 @@ def _serialize(self, record, write_precision) -> bytes:
244247 _result = record .encode ("utf-8" )
245248
246249 elif isinstance (record , Point ):
247- _result = self ._serialize (record .to_line_protocol (), write_precision = write_precision )
250+ _result = self ._serialize (record .to_line_protocol (), write_precision , ** kwargs )
248251
249252 elif isinstance (record , dict ):
250253 _result = self ._serialize (Point .from_dict (record , write_precision = write_precision ),
251- write_precision = write_precision )
254+ write_precision , ** kwargs )
255+ elif 'DataFrame' in type (record ).__name__ :
256+ _result = self ._serialize (self ._data_frame_to_list_of_points (record ,
257+ precision = write_precision , ** kwargs ),
258+ write_precision ,
259+ ** kwargs )
260+
252261 elif isinstance (record , list ):
253- _result = b'\n ' .join ([self ._serialize (item , write_precision = write_precision ) for item in record ])
262+ _result = b'\n ' .join ([self ._serialize (item , write_precision ,
263+ ** kwargs ) for item in record ])
254264
255265 return _result
256266
257- def _write_batching (self , bucket , org , data , precision = DEFAULT_WRITE_PRECISION ):
267+ def _write_batching (self , bucket , org , data ,
268+ precision = DEFAULT_WRITE_PRECISION ,
269+ ** kwargs ):
258270 _key = _BatchItemKey (bucket , org , precision )
259271 if isinstance (data , bytes ):
260272 self ._subject .on_next (_BatchItem (key = _key , data = data ))
261273
262274 elif isinstance (data , str ):
263- self ._write_batching (bucket , org , data .encode ("utf-8" ), precision )
275+ self ._write_batching (bucket , org , data .encode ("utf-8" ),
276+ precision , ** kwargs )
264277
265278 elif isinstance (data , Point ):
266- self ._write_batching (bucket , org , data .to_line_protocol (), precision )
279+ self ._write_batching (bucket , org , data .to_line_protocol (),
280+ precision , ** kwargs )
267281
268282 elif isinstance (data , dict ):
269- self ._write_batching (bucket , org , Point .from_dict (data , write_precision = precision ), precision )
283+ self ._write_batching (bucket , org , Point .from_dict (data , write_precision = precision ),
284+ precision , ** kwargs )
285+
286+ elif 'DataFrame' in type (data ).__name__ :
287+ self ._write_batching (bucket , org , self ._data_frame_to_list_of_points (data , precision , ** kwargs ),
288+ precision , ** kwargs )
270289
271290 elif isinstance (data , list ):
272291 for item in data :
273- self ._write_batching (bucket , org , item , precision )
292+ self ._write_batching (bucket , org , item , precision , ** kwargs )
274293
275294 elif isinstance (data , Observable ):
276- data .subscribe (lambda it : self ._write_batching (bucket , org , it , precision ))
295+ data .subscribe (lambda it : self ._write_batching (bucket , org , it , precision , ** kwargs ))
277296 pass
278297
279298 return None
280299
300+ def _data_frame_to_list_of_points (self , data_frame , precision , ** kwargs ):
301+ from ..extras import pd
302+ if not isinstance (data_frame , pd .DataFrame ):
303+ raise TypeError ('Must be DataFrame, but type was: {0}.'
304+ .format (type (data_frame )))
305+
306+ if 'data_frame_measurement_name' not in kwargs :
307+ raise TypeError ('"data_frame_measurement_name" is a Required Argument' )
308+
309+ if isinstance (data_frame .index , pd .PeriodIndex ):
310+ data_frame .index = data_frame .index .to_timestamp ()
311+ else :
312+ data_frame .index = pd .to_datetime (data_frame .index )
313+
314+ if data_frame .index .tzinfo is None :
315+ data_frame .index = data_frame .index .tz_localize ('UTC' )
316+
317+ data = []
318+
319+ for c , (row ) in enumerate (data_frame .values ):
320+ point = Point (measurement_name = kwargs .get ('data_frame_measurement_name' ))
321+
322+ for count , (value ) in enumerate (row ):
323+ column = data_frame .columns [count ]
324+ data_frame_tag_columns = kwargs .get ('data_frame_tag_columns' )
325+ if data_frame_tag_columns and column in data_frame_tag_columns :
326+ point .tag (column , value )
327+ else :
328+ point .field (column , value )
329+
330+ point .time (data_frame .index [c ], precision )
331+
332+ if self ._point_settings .defaultTags :
333+ for key , val in self ._point_settings .defaultTags .items ():
334+ point .tag (key , val )
335+
336+ data .append (point )
337+
338+ return data
339+
281340 def _http (self , batch_item : _BatchItem ):
282341
283342 logger .debug ("Write time series data into InfluxDB: %s" , batch_item )
0 commit comments