diff --git a/CHANGELOG.md b/CHANGELOG.md
index 14d62c92c3e..6d6845c4ad8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,7 +4,7 @@
* Refactor and improve presets for PyTorch ([pull #1360](https://github.com/bytedeco/javacpp-presets/pull/1360))
* Include `mkl_lapack.h` header file in presets for MKL ([issue #1388](https://github.com/bytedeco/javacpp-presets/issues/1388))
* Map new higher-level C++ API of Triton Inference Server ([pull #1361](https://github.com/bytedeco/javacpp-presets/pull/1361))
- * Upgrade presets for OpenCV 4.8.1, FFmpeg 6.1, HDF5 1.14.3, DNNL 3.3.2, OpenBLAS 0.3.25, ARPACK-NG 3.9.1, CPython 3.12.0, NumPy 1.26.2, SciPy 1.11.4, LLVM 17.0.6, Leptonica 1.83.1, Tesseract 5.3.3, CUDA 12.3.0, cuDNN 8.9.5, NCCL 2.18.5, PyTorch 2.1.1 ([pull #1426](https://github.com/bytedeco/javacpp-presets/pull/1426)), TensorFlow Lite 2.15.0, Triton Inference Server 2.38.0, DepthAI 2.23.0, ONNX 1.15.0, ONNX Runtime 1.16.3, TVM 0.14.0, and their dependencies
+ * Upgrade presets for OpenCV 4.8.1, FFmpeg 6.1, HDF5 1.14.3, DNNL 3.3.2, OpenBLAS 0.3.25, ARPACK-NG 3.9.1, CPython 3.12.0, NumPy 1.26.2, SciPy 1.11.4, LLVM 17.0.6, Leptonica 1.83.1, Tesseract 5.3.3, CUDA 12.3.0, cuDNN 8.9.5, NCCL 2.18.5, PyTorch 2.1.2 ([pull #1426](https://github.com/bytedeco/javacpp-presets/pull/1426)), TensorFlow Lite 2.15.0, Triton Inference Server 2.38.0, DepthAI 2.23.0, ONNX 1.15.0, ONNX Runtime 1.16.3, TVM 0.14.0, and their dependencies
### June 6, 2023 version 1.5.9
* Virtualize `nvinfer1::IGpuAllocator` from TensorRT to allow customization ([pull #1367](https://github.com/bytedeco/javacpp-presets/pull/1367))
diff --git a/platform/pom.xml b/platform/pom.xml
index 1cd763169c6..7a79f0fdeb5 100644
--- a/platform/pom.xml
+++ b/platform/pom.xml
@@ -292,7 +292,7 @@
org.bytedeco
pytorch-platform
- 2.1.1-${project.version}
+ 2.1.2-${project.version}
org.bytedeco
diff --git a/pytorch/README.md b/pytorch/README.md
index ef7af9f14e2..dae14de1aed 100644
--- a/pytorch/README.md
+++ b/pytorch/README.md
@@ -9,7 +9,7 @@ Introduction
------------
This directory contains the JavaCPP Presets module for:
- * PyTorch 2.1.1 https://pytorch.org/
+ * PyTorch 2.1.2 https://pytorch.org/
Please refer to the parent README.md file for more detailed information about the JavaCPP Presets.
@@ -48,14 +48,14 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic
org.bytedeco
pytorch-platform
- 2.1.1-1.5.10-SNAPSHOT
+ 2.1.2-1.5.10-SNAPSHOT
org.bytedeco
pytorch-platform-gpu
- 2.1.1-1.5.10-SNAPSHOT
+ 2.1.2-1.5.10-SNAPSHOT
diff --git a/pytorch/cppbuild.sh b/pytorch/cppbuild.sh
index 4ef4ef3cde4..acf51c1b65b 100755
--- a/pytorch/cppbuild.sh
+++ b/pytorch/cppbuild.sh
@@ -35,7 +35,7 @@ if [[ $PLATFORM == windows* ]]; then
export PYTHON_BIN_PATH=$(which python.exe)
fi
-PYTORCH_VERSION=2.1.1
+PYTORCH_VERSION=2.1.2
export PYTORCH_BUILD_VERSION="$PYTORCH_VERSION"
export PYTORCH_BUILD_NUMBER=1
diff --git a/pytorch/platform/gpu/pom.xml b/pytorch/platform/gpu/pom.xml
index c3cf9d9b37e..ddbc71f460e 100644
--- a/pytorch/platform/gpu/pom.xml
+++ b/pytorch/platform/gpu/pom.xml
@@ -12,7 +12,7 @@
org.bytedeco
pytorch-platform-gpu
- 2.1.1-${project.parent.version}
+ 2.1.2-${project.parent.version}
JavaCPP Presets Platform GPU for PyTorch
diff --git a/pytorch/platform/pom.xml b/pytorch/platform/pom.xml
index 24b185fc4dc..f16b282f403 100644
--- a/pytorch/platform/pom.xml
+++ b/pytorch/platform/pom.xml
@@ -12,7 +12,7 @@
org.bytedeco
pytorch-platform
- 2.1.1-${project.parent.version}
+ 2.1.2-${project.parent.version}
JavaCPP Presets Platform for PyTorch
diff --git a/pytorch/pom.xml b/pytorch/pom.xml
index 1f31a727def..74fc1813363 100644
--- a/pytorch/pom.xml
+++ b/pytorch/pom.xml
@@ -11,7 +11,7 @@
org.bytedeco
pytorch
- 2.1.1-${project.parent.version}
+ 2.1.2-${project.parent.version}
JavaCPP Presets for PyTorch
diff --git a/pytorch/samples/pom.xml b/pytorch/samples/pom.xml
index cc995393235..89ca1a20dd1 100644
--- a/pytorch/samples/pom.xml
+++ b/pytorch/samples/pom.xml
@@ -12,14 +12,14 @@
org.bytedeco
pytorch-platform
- 2.1.1-1.5.10-SNAPSHOT
+ 2.1.2-1.5.10-SNAPSHOT
org.bytedeco
pytorch-platform-gpu
- 2.1.1-1.5.10-SNAPSHOT
+ 2.1.2-1.5.10-SNAPSHOT
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceStats.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceStats.java
index ec8ed6dedb4..c745ac42a2f 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceStats.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceStats.java
@@ -42,25 +42,25 @@ public class DeviceStats extends Pointer {
}
// COUNT: allocations requested by client code
- public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") BoolPointer allocation(); public native DeviceStats allocation(BoolPointer setter);
+ public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") Stat allocation(); public native DeviceStats allocation(Stat setter);
// COUNT: number of allocated segments from cudaMalloc().
- public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") BoolPointer segment(); public native DeviceStats segment(BoolPointer setter);
+ public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") Stat segment(); public native DeviceStats segment(Stat setter);
// COUNT: number of active memory blocks (allocated or used by stream)
- public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") BoolPointer active(); public native DeviceStats active(BoolPointer setter);
+ public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") Stat active(); public native DeviceStats active(Stat setter);
// COUNT: number of inactive, split memory blocks (unallocated but can't be
// released via cudaFree)
- public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") BoolPointer inactive_split(); public native DeviceStats inactive_split(BoolPointer setter);
+ public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") Stat inactive_split(); public native DeviceStats inactive_split(Stat setter);
// SUM: bytes allocated by this memory alocator
- public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") BoolPointer allocated_bytes(); public native DeviceStats allocated_bytes(BoolPointer setter);
+ public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") Stat allocated_bytes(); public native DeviceStats allocated_bytes(Stat setter);
// SUM: bytes reserved by this memory allocator (both free and used)
- public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") BoolPointer reserved_bytes(); public native DeviceStats reserved_bytes(BoolPointer setter);
+ public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") Stat reserved_bytes(); public native DeviceStats reserved_bytes(Stat setter);
// SUM: bytes within active memory blocks
- public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") BoolPointer active_bytes(); public native DeviceStats active_bytes(BoolPointer setter);
+ public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") Stat active_bytes(); public native DeviceStats active_bytes(Stat setter);
// SUM: bytes within inactive, split memory blocks
- public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") BoolPointer inactive_split_bytes(); public native DeviceStats inactive_split_bytes(BoolPointer setter);
+ public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") Stat inactive_split_bytes(); public native DeviceStats inactive_split_bytes(Stat setter);
// SUM: bytes requested by client code
- public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") BoolPointer requested_bytes(); public native DeviceStats requested_bytes(BoolPointer setter);
+ public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") Stat requested_bytes(); public native DeviceStats requested_bytes(Stat setter);
// COUNT: total number of failed calls to CUDA malloc necessitating cache
// flushes.
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java
index 69a10faaac4..296eb21d819 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java
@@ -76579,11 +76579,11 @@ scalar_t sf(scalar_t x, scalar_t y)
public static final int TORCH_VERSION_MINOR = 1;
/** Indicates the patch version of LibTorch. */
-public static final int TORCH_VERSION_PATCH = 1;
+public static final int TORCH_VERSION_PATCH = 2;
/** Indicates the version of LibTorch. */
public static final String TORCH_VERSION =
- "2.1.1";
+ "2.1.2";
// Parsed from torch/csrc/autograd/InferenceMode.h
diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java
index ad06117d870..2cf50c171b6 100644
--- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java
+++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java
@@ -104,6 +104,9 @@ public void map(InfoMap infoMap) {
.put(new Info("std::vector").pointerTypes("CUDAKernelLaunchInfoVector").define())
.put(new Info("const std::vector", "std::vector").pointerTypes("TraceEntryVector").define())
+ //// std::array
+ .put(new Info("std::array", "c10::cuda::CUDACachingAllocator::StatArray").cast().pointerTypes("Stat"))
+
//// Function pointers
// Function pointer returning shared_ptr don't compile on windows
// "D:\a\javacpp-presets\javacpp-presets\pytorch\target\native\org\bytedeco\pytorch\windows-x86_64\jnitorch.cpp(98904): error C2526: 'JavaCPP_org_bytedeco_pytorch_functions_GatheredContextSupplier_allocate_callback': C linkage function cannot return C++ class 'std::shared_ptr'"