From 744c5f5166958b2e90dafbf9eced5ae74ef8e2a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joseph=20Hopfm=C3=BCller?= Date: Sat, 16 Nov 2024 00:39:19 +0100 Subject: [PATCH] rename dir; add torch import test script --- .../_path_fix.py | 0 .../generate_signal.py | 0 src/single-core-regen/torch-import-test.py | 68 +++++++++++++++++++ 3 files changed, 68 insertions(+) rename src/{single-core-regen => single-core-data-gen}/_path_fix.py (100%) rename src/{single-core-regen => single-core-data-gen}/generate_signal.py (100%) create mode 100644 src/single-core-regen/torch-import-test.py diff --git a/src/single-core-regen/_path_fix.py b/src/single-core-data-gen/_path_fix.py similarity index 100% rename from src/single-core-regen/_path_fix.py rename to src/single-core-data-gen/_path_fix.py diff --git a/src/single-core-regen/generate_signal.py b/src/single-core-data-gen/generate_signal.py similarity index 100% rename from src/single-core-regen/generate_signal.py rename to src/single-core-data-gen/generate_signal.py diff --git a/src/single-core-regen/torch-import-test.py b/src/single-core-regen/torch-import-test.py new file mode 100644 index 0000000..b010a17 --- /dev/null +++ b/src/single-core-regen/torch-import-test.py @@ -0,0 +1,68 @@ +import torch +import time + +def print_torch_env(): + print("Torch version: ", torch.__version__) + print("CUDA available: ", torch.cuda.is_available()) + print("CUDA version: ", torch.version.cuda) + print("CUDNN version: ", torch.backends.cudnn.version()) + print("Device count: ", torch.cuda.device_count()) + print("Current device: ", torch.cuda.current_device()) + print("Device name: ", torch.cuda.get_device_name(0)) + print("Device capability: ", torch.cuda.get_device_capability(0)) + print("Device memory: ", torch.cuda.get_device_properties(0).total_memory) + +def measure_runtime(func): + """ + Measure the runtime of a function. + + :param func: Function to measure + :type func: function + :return: Wrapped function with runtime measurement + :rtype: function + """ + def wrapper(*args, **kwargs): + start_time = time.time() + result = func(*args, **kwargs) + end_time = time.time() + print(f"Runtime: {end_time - start_time:.6f} seconds") + return result, end_time - start_time + return wrapper + +@measure_runtime +def tensor_addition(a, b): + """ + Perform tensor addition. + + :param a: First tensor + :type a: torch.Tensor + :param b: Second tensor + :type b: torch.Tensor + :return: Sum of tensors + :rtype: torch.Tensor + """ + return a + b + +def runtime_test(): + x = torch.rand(2**18, 2**10) + y = torch.rand(2**18, 2**10) + + print("Tensor addition on CPU") + _, cpu_time = tensor_addition(x, y) + + print() + print("Tensor addition on GPU") + if not torch.cuda.is_available(): + print("CUDA is not available") + return + + _, gpu_time = tensor_addition(x.cuda(), y.cuda()) + + print() + print(f"Speedup: {cpu_time / gpu_time *100:.2f}%") + + +if __name__ == "__main__": + print_torch_env() + print() + runtime_test() \ No newline at end of file