aboutsummaryrefslogtreecommitdiff
path: root/gnu/packages/patches/python-pytorch-without-kineto.patch
diff options
context:
space:
mode:
Diffstat (limited to 'gnu/packages/patches/python-pytorch-without-kineto.patch')
-rw-r--r--gnu/packages/patches/python-pytorch-without-kineto.patch60
1 files changed, 60 insertions, 0 deletions
diff --git a/gnu/packages/patches/python-pytorch-without-kineto.patch b/gnu/packages/patches/python-pytorch-without-kineto.patch
new file mode 100644
index 0000000000..f956316866
--- /dev/null
+++ b/gnu/packages/patches/python-pytorch-without-kineto.patch
@@ -0,0 +1,60 @@
+Even when building without Kineto, the <ActivityType.h> header is still
+imported and the ActivityType type is used. This patch was copied from
+https://github.com/pytorch/pytorch/pull/111048.
+
+diff --git a/torch/csrc/profiler/kineto_shim.h b/torch/csrc/profiler/kineto_shim.h
+index e92cbf00..68985ab7 100644
+--- a/torch/csrc/profiler/kineto_shim.h
++++ b/torch/csrc/profiler/kineto_shim.h
+@@ -12,7 +12,51 @@
+ #undef USE_KINETO
+ #endif
+
++#ifdef USE_KINETO
+ #include <ActivityType.h>
++#else
++namespace libkineto {
++// copied from header
++/*
++ * Copyright (c) Meta Platforms, Inc. and affiliates.
++ * All rights reserved.
++ *
++ * This source code is licensed under the BSD-style license found in the
++ * LICENSE file in the root directory of this source tree.
++ */
++
++// Note : All activity types are not enabled by default. Please add them
++// at correct position in the enum
++enum class ActivityType {
++ // Activity types enabled by default
++ CPU_OP = 0, // cpu side ops
++ USER_ANNOTATION,
++ GPU_USER_ANNOTATION,
++ GPU_MEMCPY,
++ GPU_MEMSET,
++ CONCURRENT_KERNEL, // on-device kernels
++ EXTERNAL_CORRELATION,
++ CUDA_RUNTIME, // host side cuda runtime events
++ CUDA_DRIVER, // host side cuda driver events
++ CPU_INSTANT_EVENT, // host side point-like events
++ PYTHON_FUNCTION,
++ OVERHEAD, // CUPTI induced overhead events sampled from its overhead API.
++
++ // Optional Activity types
++ CUDA_SYNC, // synchronization events between runtime and kernels
++ GLOW_RUNTIME, // host side glow runtime events
++ MTIA_RUNTIME, // host side MTIA runtime events
++ CUDA_PROFILER_RANGE, // CUPTI Profiler range for performance metrics
++ MTIA_CCP_EVENTS, // MTIA ondevice CCP events
++ HPU_OP, // HPU host side runtime event
++ XPU_RUNTIME, // host side xpu runtime events
++
++ ENUM_COUNT, // This is to add buffer and not used for any profiling logic. Add your new type before it.
++ OPTIONAL_ACTIVITY_TYPE_START = CUDA_SYNC,
++};
++}
++
++#endif
+
+ #include <torch/csrc/Export.h>
+ #include <torch/csrc/profiler/api.h>