Skip to content
This repository was archived by the owner on Feb 3, 2025. It is now read-only.

Commit 62c24e2

Browse files
author
DEKHTIARJonathan
committed
[Benchmarking-Py] - Release 2.0.1 - Change to perf_counter()
1 parent a40ca6a commit 62c24e2

File tree

7 files changed

+20
-15
lines changed

7 files changed

+20
-15
lines changed

tftrt/benchmarking-python/CHANGELOG.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,11 @@ Description of the change
4646

4747
<!-- YOU CAN EDIT FROM HERE -->
4848

49+
## [2.0.1] - 2022.09.30 - @DEKHTIARJonathan
50+
51+
- Change from `time.time()` to `time.perf_counter()` for better time
52+
measurement.
53+
4954
## [2.0.0] - 2022.08.04 - @DEKHTIARJonathan
5055

5156
- Fix for XLA FP16 actually not being applied due to `"min_graph_nodes": -1`

tftrt/benchmarking-python/benchmark_autotuner.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,9 +32,9 @@ def __init__(self, funcs, calls_per_func, skip_n_first):
3232
def _autotune(self, *arg, **kwargs):
3333
fn_id = self._call_counter // self._calls_per_func
3434
try:
35-
start_t = time.time()
35+
start_t = time.perf_counter()
3636
output = self._fns[fn_id](*arg, **kwargs)
37-
self._timings[fn_id].append(time.time() - start_t)
37+
self._timings[fn_id].append(time.perf_counter() - start_t)
3838
except IndexError:
3939
print() # visual spacing
4040
logging.debug("AutoTuning is over... Collecting timing statistics:")

tftrt/benchmarking-python/benchmark_info.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
# The `__version__` number shall be updated everytime core benchmarking files
1111
# are updated.
1212
# Please update CHANGELOG.md with a description of what this version changed.
13-
__version__ = "2.0.0"
13+
__version__ = "2.0.1"
1414

1515

1616
def get_commit_id():

tftrt/benchmarking-python/benchmark_runner.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -589,9 +589,9 @@ def start_profiling():
589589

590590
with tracing_ctx('Input Dequeueing'):
591591
try:
592-
start_time = time.time()
592+
start_time = time.perf_counter()
593593
data_batch = dequeue_batch_fn()
594-
dequeue_times.append(time.time() - start_time)
594+
dequeue_times.append(time.perf_counter() - start_time)
595595
except (StopIteration, OutOfRangeError):
596596
logging.info("[Exiting] Reached end of dataset ...")
597597
break
@@ -600,14 +600,14 @@ def start_profiling():
600600
x, y = self.preprocess_model_inputs(data_batch)
601601

602602
with tracing_ctx('Inputs MemcpyHtoD'):
603-
start_time = time.time()
603+
start_time = time.perf_counter()
604604
x = force_data_on_gpu_fn(x)
605-
memcopy_times.append(time.time() - start_time)
605+
memcopy_times.append(time.perf_counter() - start_time)
606606

607607
with tracing_ctx('GPU Inference'):
608-
start_time = time.time()
608+
start_time = time.perf_counter()
609609
y_pred = infer_batch(x)
610-
iter_times.append(time.time() - start_time)
610+
iter_times.append(time.perf_counter() - start_time)
611611

612612
if not self._args.debug_performance:
613613
log_step(

tftrt/benchmarking-python/benchmark_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,9 +68,9 @@ def timed_section(msg, activate=True, start_end_mode=True):
6868
if start_end_mode:
6969
logging.info(f"[START] {msg} ...")
7070

71-
start_time = time.time()
71+
start_time = time.perf_counter()
7272
yield
73-
total_time = time.time() - start_time
73+
total_time = time.perf_counter() - start_time
7474

7575
if start_end_mode:
7676
logging.info(f"[END] {msg} - Duration: {total_time:.1f}s")

tftrt/benchmarking-python/template/infer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -137,9 +137,9 @@ def evaluate_model(self, predictions, expected, bypass_data_to_eval):
137137
# let's say transforming a list into a dict() or reverse
138138
batch = preprocess_model_inputs(batch)
139139

140-
start_t = time.time()
140+
start_t = time.perf_counter()
141141
outputs = model_fn(batch)
142-
print(f"Inference Time: {(time.time() - start_t)*1000:.1f}ms") # 0.001
142+
print(f"Inference Time: {(time.perf_counter() - start_t)*1000:.1f}ms") # 0.001
143143

144144
## post my outputs to "measure accuracy"
145145
## note: we skip that

tftrt/blog_posts/Leveraging TensorFlow-TensorRT integration for Low latency Inference/tf2_inference.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -132,10 +132,10 @@ def calibration_input_fn():
132132
for step in range(1, INFERENCE_STEPS + 1):
133133
if step % 100 == 0:
134134
print("Processing step: %04d ..." % step)
135-
start_t = time.time()
135+
start_t = time.perf_counter()
136136
probs = infer(features)[output_tensorname]
137137
inferred_class = tf.math.argmax(probs).numpy()
138-
step_time = time.time() - start_t
138+
step_time = time.perf_counter() - start_t
139139
if step >= WARMUP_STEPS:
140140
step_times.append(step_time)
141141
except tf.errors.OutOfRangeError:

0 commit comments

Comments
 (0)