Skip to content

Commit 2123f70

Browse files
committed
chore: updated examples and README.rst to use with statement
1 parent 5a05075 commit 2123f70

12 files changed

+704
-756
lines changed

README.rst

+271-284
Large diffs are not rendered by default.

examples/buckets_management.py

+34-35
Original file line numberDiff line numberDiff line change
@@ -10,39 +10,38 @@
1010
url = "http://localhost:8086"
1111
token = "my-token"
1212

13-
client = InfluxDBClient(url=url, token=token)
14-
buckets_api = client.buckets_api()
13+
with InfluxDBClient(url=url, token=token) as client:
14+
buckets_api = client.buckets_api()
15+
16+
"""
17+
The Bucket API uses as a parameter the Organization ID. We have to retrieve ID by Organization API.
18+
"""
19+
org_name = "my-org"
20+
org = list(filter(lambda it: it.name == org_name, client.organizations_api().find_organizations()))[0]
21+
22+
"""
23+
Create Bucket with retention policy set to 3600 seconds and name "bucket-by-python"
24+
"""
25+
print(f"------- Create -------\n")
26+
retention_rules = BucketRetentionRules(type="expire", every_seconds=3600)
27+
created_bucket = buckets_api.create_bucket(bucket_name="bucket-by-python",
28+
retention_rules=retention_rules,
29+
org_id=org.id)
30+
print(created_bucket)
31+
32+
"""
33+
List all Buckets
34+
"""
35+
print(f"\n------- List -------\n")
36+
buckets = buckets_api.find_buckets().buckets
37+
print("\n".join([f" ---\n ID: {bucket.id}\n Name: {bucket.name}\n Retention: {bucket.retention_rules}"
38+
for bucket in buckets]))
39+
print("---")
40+
41+
"""
42+
Delete previously created bucket
43+
"""
44+
print(f"------- Delete -------\n")
45+
buckets_api.delete_bucket(created_bucket)
46+
print(f" successfully deleted bucket: {created_bucket.name}")
1547

16-
"""
17-
The Bucket API uses as a parameter the Organization ID. We have to retrieve ID by Organization API.
18-
"""
19-
org_name = "my-org"
20-
org = list(filter(lambda it: it.name == org_name, client.organizations_api().find_organizations()))[0]
21-
22-
"""
23-
Create Bucket with retention policy set to 3600 seconds and name "bucket-by-python"
24-
"""
25-
print(f"------- Create -------\n")
26-
retention_rules = BucketRetentionRules(type="expire", every_seconds=3600)
27-
created_bucket = buckets_api.create_bucket(bucket_name="bucket-by-python",
28-
retention_rules=retention_rules,
29-
org_id=org.id)
30-
print(created_bucket)
31-
32-
"""
33-
List all Buckets
34-
"""
35-
print(f"\n------- List -------\n")
36-
buckets = buckets_api.find_buckets().buckets
37-
print("\n".join([f" ---\n ID: {bucket.id}\n Name: {bucket.name}\n Retention: {bucket.retention_rules}"
38-
for bucket in buckets]))
39-
print("---")
40-
41-
"""
42-
Delete previously created bucket
43-
"""
44-
print(f"------- Delete -------\n")
45-
buckets_api.delete_bucket(created_bucket)
46-
print(f" successfully deleted bucket: {created_bucket.name}")
47-
48-
client.close()

examples/example.py

+34-34
Original file line numberDiff line numberDiff line change
@@ -6,37 +6,37 @@
66

77
bucket = "my-bucket"
88

9-
client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org")
10-
11-
write_api = client.write_api(write_options=SYNCHRONOUS)
12-
query_api = client.query_api()
13-
14-
p = Point("my_measurement").tag("location", "Prague").field("temperature", 25.3).time(datetime.now(), WritePrecision.MS)
15-
16-
# write using point structure
17-
write_api.write(bucket=bucket, record=p)
18-
19-
line_protocol = p.to_line_protocol()
20-
print(line_protocol)
21-
22-
# write using line protocol string
23-
write_api.write(bucket=bucket, record=line_protocol)
24-
25-
# using Table structure
26-
tables = query_api.query('from(bucket:"my-bucket") |> range(start: -1m)')
27-
for table in tables:
28-
print(table)
29-
for record in table.records:
30-
# process record
31-
print(record.values)
32-
33-
# using csv library
34-
csv_result = query_api.query_csv('from(bucket:"my-bucket") |> range(start: -10m)')
35-
val_count = 0
36-
for record in csv_result:
37-
for cell in record:
38-
val_count += 1
39-
print("val count: ", val_count)
40-
41-
response = query_api.query_raw('from(bucket:"my-bucket") |> range(start: -10m)')
42-
print (codecs.decode(response.data))
9+
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
10+
query_api = client.query_api()
11+
12+
p = Point("my_measurement").tag("location", "Prague").field("temperature", 25.3).time(datetime.now(),
13+
WritePrecision.MS)
14+
with client.write_api(write_options=SYNCHRONOUS) as write_api:
15+
16+
# write using point structure
17+
write_api.write(bucket=bucket, record=p)
18+
19+
line_protocol = p.to_line_protocol()
20+
print(line_protocol)
21+
22+
# write using line protocol string
23+
write_api.write(bucket=bucket, record=line_protocol)
24+
25+
# using Table structure
26+
tables = query_api.query('from(bucket:"my_measurement") |> range(start: -1m)')
27+
for table in tables:
28+
print(table)
29+
for record in table.records:
30+
# process record
31+
print(record.values)
32+
33+
# using csv library
34+
csv_result = query_api.query_csv('from(bucket:"my-bucket") |> range(start: -10m)')
35+
val_count = 0
36+
for record in csv_result:
37+
for cell in record:
38+
val_count += 1
39+
print("val count: ", val_count)
40+
41+
response = query_api.query_raw('from(bucket:"my-bucket") |> range(start: -10m)')
42+
print (codecs.decode(response.data))

examples/import_data_set.py

+26-32
Original file line numberDiff line numberDiff line change
@@ -60,39 +60,33 @@ def parse_row(row: OrderedDict):
6060
.from_iterable(DictReader(open('vix-daily.csv', 'r'))) \
6161
.pipe(ops.map(lambda row: parse_row(row)))
6262

63-
client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org", debug=True)
63+
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org", debug=True) as client:
6464

65-
"""
66-
Create client that writes data in batches with 50_000 items.
67-
"""
68-
write_api = client.write_api(write_options=WriteOptions(batch_size=50_000, flush_interval=10_000))
69-
70-
"""
71-
Write data into InfluxDB
72-
"""
73-
write_api.write(bucket="my-bucket", record=data)
74-
write_api.close()
65+
"""
66+
Create client that writes data in batches with 50_000 items.
67+
"""
68+
with client.write_api(write_options=WriteOptions(batch_size=50_000, flush_interval=10_000)) as write_api:
7569

76-
"""
77-
Querying max value of CBOE Volatility Index
78-
"""
79-
query = 'from(bucket:"my-bucket")' \
80-
' |> range(start: 0, stop: now())' \
81-
' |> filter(fn: (r) => r._measurement == "financial-analysis")' \
82-
' |> max()'
83-
result = client.query_api().query(query=query)
70+
"""
71+
Write data into InfluxDB
72+
"""
73+
write_api.write(bucket="my-bucket", record=data)
8474

85-
"""
86-
Processing results
87-
"""
88-
print()
89-
print("=== results ===")
90-
print()
91-
for table in result:
92-
for record in table.records:
93-
print('max {0:5} = {1}'.format(record.get_field(), record.get_value()))
75+
"""
76+
Querying max value of CBOE Volatility Index
77+
"""
78+
query = 'from(bucket:"my-bucket")' \
79+
' |> range(start: 0, stop: now())' \
80+
' |> filter(fn: (r) => r._measurement == "financial-analysis")' \
81+
' |> max()'
82+
result = client.query_api().query(query=query)
9483

95-
"""
96-
Close client
97-
"""
98-
client.close()
84+
"""
85+
Processing results
86+
"""
87+
print()
88+
print("=== results ===")
89+
print()
90+
for table in result:
91+
for record in table.records:
92+
print('max {0:5} = {1}'.format(record.get_field(), record.get_value()))

examples/import_data_set_multiprocessing.py

+72-71
Original file line numberDiff line numberDiff line change
@@ -132,88 +132,89 @@ def init_counter(counter, progress, queue):
132132
queue_ = queue
133133

134134

135-
"""
136-
Create multiprocess shared environment
137-
"""
138-
queue_ = multiprocessing.Manager().Queue()
139-
counter_ = Value('i', 0)
140-
progress_ = Value('i', 0)
141-
startTime = datetime.now()
142-
143-
url = "https://s3.amazonaws.com/nyc-tlc/trip+data/fhv_tripdata_2019-01.csv"
144-
# url = "file:///Users/bednar/Developer/influxdata/influxdb-client-python/examples/fhv_tripdata_2019-01.csv"
135+
if __name__ == "__main__":
136+
"""
137+
Create multiprocess shared environment
138+
"""
139+
queue_ = multiprocessing.Manager().Queue()
140+
counter_ = Value('i', 0)
141+
progress_ = Value('i', 0)
142+
startTime = datetime.now()
145143

146-
"""
147-
Open URL and for stream data
148-
"""
149-
response = urlopen(url)
150-
if response.headers:
151-
content_length = response.headers['Content-length']
152-
io_wrapper = ProgressTextIOWrapper(response)
153-
io_wrapper.progress = progress_
144+
url = "https://s3.amazonaws.com/nyc-tlc/trip+data/fhv_tripdata_2019-01.csv"
145+
# url = "file:///Users/bednar/Developer/influxdata/influxdb-client-python/examples/fhv_tripdata_2019-01.csv"
154146

155-
"""
156-
Start writer as a new process
157-
"""
158-
writer = InfluxDBWriter(queue_)
159-
writer.start()
147+
"""
148+
Open URL and for stream data
149+
"""
150+
response = urlopen(url)
151+
if response.headers:
152+
content_length = response.headers['Content-length']
153+
io_wrapper = ProgressTextIOWrapper(response)
154+
io_wrapper.progress = progress_
160155

161-
"""
162-
Create process pool for parallel encoding into LineProtocol
163-
"""
164-
cpu_count = multiprocessing.cpu_count()
165-
with concurrent.futures.ProcessPoolExecutor(cpu_count, initializer=init_counter,
166-
initargs=(counter_, progress_, queue_)) as executor:
167156
"""
168-
Converts incoming HTTP stream into sequence of LineProtocol
157+
Start writer as a new process
169158
"""
170-
data = rx \
171-
.from_iterable(DictReader(io_wrapper)) \
172-
.pipe(ops.buffer_with_count(10_000),
173-
# Parse 10_000 rows into LineProtocol on subprocess
174-
ops.flat_map(lambda rows: executor.submit(parse_rows, rows, content_length)))
159+
writer = InfluxDBWriter(queue_)
160+
writer.start()
175161

176162
"""
177-
Write data into InfluxDB
163+
Create process pool for parallel encoding into LineProtocol
178164
"""
179-
data.subscribe(on_next=lambda x: None, on_error=lambda ex: print(f'Unexpected error: {ex}'))
165+
cpu_count = multiprocessing.cpu_count()
166+
with concurrent.futures.ProcessPoolExecutor(cpu_count, initializer=init_counter,
167+
initargs=(counter_, progress_, queue_)) as executor:
168+
"""
169+
Converts incoming HTTP stream into sequence of LineProtocol
170+
"""
171+
data = rx \
172+
.from_iterable(DictReader(io_wrapper)) \
173+
.pipe(ops.buffer_with_count(10_000),
174+
# Parse 10_000 rows into LineProtocol on subprocess
175+
ops.flat_map(lambda rows: executor.submit(parse_rows, rows, content_length)))
176+
177+
"""
178+
Write data into InfluxDB
179+
"""
180+
data.subscribe(on_next=lambda x: None, on_error=lambda ex: print(f'Unexpected error: {ex}'))
180181

181-
"""
182-
Terminate Writer
183-
"""
184-
queue_.put(None)
185-
queue_.join()
182+
"""
183+
Terminate Writer
184+
"""
185+
queue_.put(None)
186+
queue_.join()
186187

187-
print()
188-
print(f'Import finished in: {datetime.now() - startTime}')
189-
print()
188+
print()
189+
print(f'Import finished in: {datetime.now() - startTime}')
190+
print()
190191

191-
"""
192-
Querying 10 pickups from dispatching 'B00008'
193-
"""
194-
query = 'from(bucket:"my-bucket")' \
195-
'|> range(start: 2019-01-01T00:00:00Z, stop: now()) ' \
196-
'|> filter(fn: (r) => r._measurement == "taxi-trip-data")' \
197-
'|> filter(fn: (r) => r.dispatching_base_num == "B00008")' \
198-
'|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' \
199-
'|> rename(columns: {_time: "pickup_datetime"})' \
200-
'|> drop(columns: ["_start", "_stop"])|> limit(n:10, offset: 0)'
192+
"""
193+
Querying 10 pickups from dispatching 'B00008'
194+
"""
195+
query = 'from(bucket:"my-bucket")' \
196+
'|> range(start: 2019-01-01T00:00:00Z, stop: now()) ' \
197+
'|> filter(fn: (r) => r._measurement == "taxi-trip-data")' \
198+
'|> filter(fn: (r) => r.dispatching_base_num == "B00008")' \
199+
'|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' \
200+
'|> rename(columns: {_time: "pickup_datetime"})' \
201+
'|> drop(columns: ["_start", "_stop"])|> limit(n:10, offset: 0)'
201202

202-
client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org", debug=False)
203-
result = client.query_api().query(query=query)
203+
client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org", debug=False)
204+
result = client.query_api().query(query=query)
204205

205-
"""
206-
Processing results
207-
"""
208-
print()
209-
print("=== Querying 10 pickups from dispatching 'B00008' ===")
210-
print()
211-
for table in result:
212-
for record in table.records:
213-
print(
214-
f'Dispatching: {record["dispatching_base_num"]} pickup: {record["pickup_datetime"]} dropoff: {record["dropoff_datetime"]}')
206+
"""
207+
Processing results
208+
"""
209+
print()
210+
print("=== Querying 10 pickups from dispatching 'B00008' ===")
211+
print()
212+
for table in result:
213+
for record in table.records:
214+
print(
215+
f'Dispatching: {record["dispatching_base_num"]} pickup: {record["pickup_datetime"]} dropoff: {record["dropoff_datetime"]}')
215216

216-
"""
217-
Close client
218-
"""
219-
client.close()
217+
"""
218+
Close client
219+
"""
220+
client.close()

0 commit comments

Comments
 (0)