Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
80 commits
Select commit Hold shift + click to select a range
585502d
Created initial code for loading fused_dense module dynamically inste…
amd-sriram Jun 26, 2025
d6e0ee4
add apex/git_version_info_installed.py to gitignore as it is dynamica…
amd-sriram Jun 26, 2025
cb0c9ab
add code for building fused rope dynamically
amd-sriram Jun 26, 2025
f129b0d
add code for building fused bias swiglu dynamically
amd-sriram Jun 26, 2025
be60325
fix the code so that fused rope and fused softmax are not compiled in…
amd-sriram Jun 27, 2025
7b9276c
load the jit modules inside and this prevents them from building when…
amd-sriram Jun 30, 2025
eea4c0f
convert syncbn module to jit
amd-sriram Jul 3, 2025
d6ad398
fix the unnecessary compile of syncbn module in wheel building due to…
amd-sriram Jul 3, 2025
497f54a
add fused layer norm module to jit build
amd-sriram Jul 3, 2025
12222eb
make focal loss module as jit module
amd-sriram Jul 3, 2025
1a72cb0
make focal loss module as jit module
amd-sriram Jul 3, 2025
5ee6115
make xentropy module as jit module
amd-sriram Jul 3, 2025
6533731
make bpn module as jit module
amd-sriram Jul 3, 2025
4a1a8f8
add code to build individual extensions without JIT
amd-sriram Jul 17, 2025
01f22cd
clean up the flags for the modules based on apex/setup.py
amd-sriram Jul 17, 2025
58d87ad
add function to get the backward_pass_guard_args in CudaOpBuilder and…
amd-sriram Jul 17, 2025
d47d871
add fused weight gradient mlp to jit compile
amd-sriram Jul 17, 2025
fc60c28
move fused_weight_gradient_mlp_cuda load inside so that it is not com…
amd-sriram Jul 17, 2025
ad7439a
make fused index mul 2d jit compile and dd aten atomic header flag me…
amd-sriram Jul 17, 2025
b2a26fb
make fast multihead attention as jit module, add generator_args to Cu…
amd-sriram Jul 17, 2025
8acc5f5
make transducer loss and transducer joint modules as jit modules, add…
amd-sriram Jul 17, 2025
1718d3a
remove extra method - installed_cuda_version from CUDAOpBuilder
amd-sriram Jul 17, 2025
844c8d4
add apex_C module to jit compile, add py-cpuinfo to requirements.txt …
amd-sriram Jul 17, 2025
08939ea
make nccl allocator as a jit compile module, add nccl_args method to …
amd-sriram Jul 17, 2025
fb451c9
make amp_C as a jit module
amd-sriram Jul 17, 2025
c6daabd
add a few uses of amp_C jit module
amd-sriram Jul 17, 2025
b221825
add a few uses of amp_C jit module
amd-sriram Jul 17, 2025
8973402
make fused adam as a jit module
amd-sriram Jul 17, 2025
3b38cb8
add a few uses of amp_C jit module
amd-sriram Jul 17, 2025
2d29c4c
fix the issue with fused adam jit module
amd-sriram Jul 17, 2025
742c3b3
make fused lamb as jit module
amd-sriram Jul 17, 2025
a73b33d
make distributed adam as jit module
amd-sriram Jul 17, 2025
bc7c56e
make distributed lamb as jit module
amd-sriram Jul 17, 2025
ac684d5
add remaining amp_C uses with jit loader
amd-sriram Jul 18, 2025
18ba696
add remaining usage of apexC jit module
amd-sriram Jul 22, 2025
e79a028
make nccl p2p module as jit compile
amd-sriram Jul 22, 2025
3bd3045
make peer memory module as jit compile
amd-sriram Jul 22, 2025
8ba059e
add code to check for minimum nccl version to compile nccl allocator …
amd-sriram Jul 22, 2025
4f417e6
add provision to provide APEX_CPP_OPS=1 and APEX_CUDA_OPS=1 as repla…
amd-sriram Jul 22, 2025
00d66d4
check for minimum torch version for nccl allocator, check if the modu…
amd-sriram Jul 22, 2025
705f675
add build as a dependency to support wheel building
amd-sriram Jul 24, 2025
783fbde
Replace is_compatible to check for installation conditions with is_su…
amd-sriram Jul 24, 2025
c366417
Similar to pytorch we create a make command to install aiter, that th…
amd-sriram Jul 24, 2025
43632d7
update extension import test so that it considers jit compile extensions
amd-sriram Jul 24, 2025
8118f21
clean up MultiTensorApply usages so that amp_C is not build in jit co…
amd-sriram Jul 26, 2025
bae9e71
Adding missing modules from deepspeed repo. Remove extra code in setu…
amd-sriram Jul 28, 2025
2b86e01
change name of apex_C module
amd-sriram Jul 28, 2025
b1b439d
change the name of cpp and cuda build flags, remove APEX_BUILD_OPS, c…
amd-sriram Jul 29, 2025
ab7fbd7
add missing files used in cpu accelerator
amd-sriram Jul 30, 2025
178c5fc
add make clean command to handle deleting torch extensions installed …
amd-sriram Jul 30, 2025
4f61ab3
remove unused code in setup.py, fix the code to build for cpu mode
amd-sriram Aug 5, 2025
56950da
Removing unused code
amd-sriram Aug 5, 2025
327c8cf
remove accelerator package and refactor the used code into op_builder…
amd-sriram Aug 5, 2025
6130493
remove accelerator package usages
amd-sriram Aug 5, 2025
4de63aa
revert code that was removed by mistake
amd-sriram Aug 5, 2025
223ab1d
Cleaning up the setup file and renaming functions and variable to mor…
amd-sriram Aug 6, 2025
954e7ce
Fix the nccl version so that the nccl_allocator.so file can be loaded…
amd-sriram Aug 12, 2025
87ae01c
Restore to original importing the extension code.
amd-sriram Aug 12, 2025
c21c31a
renamed compatibility/scaled_masked_softmax_cuda.py, added some extra…
amd-sriram Aug 13, 2025
7d2bb4c
Added instructions for JIT load and changes in installation options
amd-sriram Aug 13, 2025
f80e434
Restructuring the README
amd-sriram Aug 13, 2025
4b4b774
Added instructions for building wheel
amd-sriram Aug 13, 2025
71f9d67
replaced TorchCPUBuilder with CPUBuilder, added a main method in cont…
amd-sriram Oct 31, 2025
a569854
create a script to build different jit conditions for running differe…
amd-sriram Nov 10, 2025
f263567
add script to run tests with different jit builds, add instructions t…
amd-sriram Nov 11, 2025
cdf3a31
fix the issues with running the tests - improper paths, counting .so …
amd-sriram Nov 18, 2025
22b5340
add mad internal scripts
amd-sriram Nov 18, 2025
d569d5d
remove print statement
amd-sriram Nov 18, 2025
84ccba8
remove testing section from readme
amd-sriram Nov 18, 2025
a60e200
change location of result file
amd-sriram Nov 19, 2025
5df477c
remove multiple results file from models.json
amd-sriram Nov 19, 2025
c758841
add platform specific description to wheel name even if no CppExtensi…
amd-sriram Nov 22, 2025
484358c
add ninja and wheel to requirements to be installed
amd-sriram Nov 25, 2025
6388f5a
Update Release notes in Readme
amd-sriram Nov 25, 2025
929f4ad
Exclude compatibility folder while installing apex
amd-sriram Nov 25, 2025
e16c45b
Update README.md
jithunnair-amd Nov 26, 2025
b52cb46
Update README.md
jithunnair-amd Nov 26, 2025
3f8f4fd
Update README.md
jithunnair-amd Nov 26, 2025
2c6378a
Merge branch 'master' into Refactor_build
jithunnair-amd Nov 26, 2025
5920a1b
Adding modification note to the original copywrite
sriram-siloai Dec 1, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -148,3 +148,7 @@ cython_debug/
*.hip
*_hip.*
*hip*


#file temporarily created for build process
apex/git_version_info_installed.py
2 changes: 2 additions & 0 deletions MANIFEST.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
recursive-include apex/contrib/csrc *
recursive-include apex/csrc *
17 changes: 17 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
PYTHON = python3
PIP = $(PYTHON) -m pip

clean: # This will remove ALL build folders.
@test -d build/ && echo "Deleting build folder" || true
@test -d build/ && rm -r build/ || true
@test -d dist/ && echo "Deleting dist folder" || true
@test -d dist/ && rm -r dist/ || true
@test -d apex.egg-info/ && echo "Deleting apex.egg-info folder" || true
@test -d apex.egg-info/ && rm -r apex.egg-info/ || true

$(PYTHON) scripts/clean.py # remove the apex extensions installed at torch extensions folder

aiter:
$(PIP) uninstall -y aiter
cd third_party/aiter && $(PIP) install . --no-build-isolation --no-deps

109 changes: 71 additions & 38 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -100,24 +100,21 @@ Note that we recommend restoring the model using the same `opt_level`. Also note
# Installation

## Containers
ROCm pytorch containers are available from https://hub.docker.com/r/rocm/pytorch.
ROCm pytorch containers contain apex package and these are available from https://hub.docker.com/r/rocm/pytorch.

## From Source

To install Apex from source, we recommend using the nightly Pytorch obtainable from https://github.com/rocm/pytorch.
Torch must be installed before installing apex. We recommend using the nightly Pytorch obtainable from https://github.com/rocm/pytorch. The latest stable release obtainable from https://pytorch.org should also work.

The latest stable release obtainable from https://pytorch.org should also work.

## ROCm
Apex on ROCm supports both python only build and extension build.
Note: Pytorch version recommended is >=1.5 for extension build.

### To install using python only build use the following command in apex folder:
### The following command will install all the extensions, which will be built and linked at runtime using [PyTorch's JIT (just-in-time) loader](https://pytorch.org/docs/stable/cpp_extension.html):
This requires ninja to be installed
```
python setup.py install
pip install . --no-build-isolation
```

=======
### Supported Versions
| ``APEX Version`` | ``APEX branch`` | ``Torch Version`` |
|------------------|-----------------|-------------------|
Expand All @@ -140,26 +137,73 @@ ubuntu|pytorch|apex|release/1.0.0|06c33eee43f7a22f3ed7d9c3e5be0ddd757dc345|https
centos|pytorch|apex|release/1.0.0|06c33eee43f7a22f3ed7d9c3e5be0ddd757dc345|https://github.com/ROCmSoftwarePlatform/apex
```

### To install using extensions enabled use the following command in apex folder:
### To pre-build and install all the supported extensions while installing apex, use the following command in apex folder:
```
APEX_BUILD_CPP_OPS=1 APEX_BUILD_CUDA_OPS=1 pip install . --no-build-isolation
```
# if pip >= 23.1 (ref: https://pip.pypa.io/en/stable/news/#v23-1) which supports multiple `--config-settings` with the same key...
pip install -v --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" ./
# otherwise
python setup.py install --cpp_ext --cuda_ext

It is also possible to pre-build and install specific extensions by using the following command in apex folder:
```
APEX_BUILD_<OP_NAME>=1 pip install . --no-build-isolation
```
Note that using --cuda_ext flag to install Apex will also enable all the extensions supported on ROCm including "--distributed_adam", "--distributed_lamb", "--bnp", "--xentropy", "--deprecated_fused_adam", "--deprecated_fused_lamb", and "--fast_multihead_attn".
The following extensions are supported:
| extension | environment to build specific extension | install option |
|-----------|-----------|-----------|
| amp_C | APEX_BUILD_AMP_C=1 | APEX_BUILD_CUDA_OPS=1 |
| apex_C | APEX_BUILD_APEX_C=1 | APEX_BUILD_CPP_OPS=1 |
| bnp | APEX_BUILD_BNP=1 | APEX_BUILD_CUDA_OPS=1 |
| distributed_adam_cuda | APEX_BUILD_DISTRIBUTED_ADAM=1 | APEX_BUILD_CUDA_OPS=1 |
| distributed_lamb_cuda | APEX_BUILD_DISTRIBUTED_LAMB=1 | APEX_BUILD_CUDA_OPS=1 |
| fast_multihead_attn | APEX_BUILD_FAST_MULTIHEAD_ATTN=1 | APEX_BUILD_CUDA_OPS=1 |
| focal_loss_cuda | APEX_BUILD_FOCAL_LOSS=1 | APEX_BUILD_CUDA_OPS=1 |
| fused_adam_cuda | APEX_BUILD_FUSED_ADAM=1 | APEX_BUILD_CUDA_OPS=1 |
| fused_bias_swiglu | APEX_BUILD_FUSED_BIAS_SWIGLU=1 | APEX_BUILD_CUDA_OPS=1 |
| fused_dense_cuda | APEX_BUILD_FUSED_DENSE=1 | APEX_BUILD_CUDA_OPS=1 |
| fused_index_mul_2d | APEX_BUILD_FUSED_INDEX_MUL_2D=1 | APEX_BUILD_CUDA_OPS=1 |
| fused_lamb_cuda | APEX_BUILD_FUSED_LAMB=1 | APEX_BUILD_CUDA_OPS=1 |
| fused_layer_norm_cuda | APEX_BUILD_FUSED_LAYER_NORM=1 | APEX_BUILD_CUDA_OPS=1 |
| fused_rotary_positional_embedding | APEX_BUILD_FUSED_ROPE=1 | APEX_BUILD_CUDA_OPS=1 |
| fused_weight_gradient_mlp_cuda | APEX_BUILD_FUSED_WEIGHT_GRADIENT_MLP=1 | APEX_BUILD_CUDA_OPS=1 |
| generic_scaled_masked_softmax_cuda | APEX_BUILD_GENERIC_SCALED_MASKED_SOFTMAX_CUDA=1 | APEX_BUILD_CUDA_OPS=1 |
| mlp_cuda | APEX_BUILD_MLP=1 | APEX_BUILD_CUDA_OPS=1 |
| _apex_nccl_allocator | APEX_BUILD_NCCL_ALLOCATOR=1 | APEX_BUILD_CUDA_OPS=1 |
| nccl_p2p_cuda | APEX_BUILD_NCCL_P2P=1 | APEX_BUILD_CUDA_OPS=1 |
| peer_memory_cuda | APEX_BUILD_PEER_MEMORY=1 | APEX_BUILD_CUDA_OPS=1 |
| scaled_masked_softmax_cuda | APEX_BUILD_SCALED_MASKED_SOFTMAX_CUDA=1 | APEX_BUILD_CUDA_OPS=1 |
| scaled_softmax_cuda | APEX_BUILD_SCALED_SOFTMAX_CUDA=1 | APEX_BUILD_CUDA_OPS=1 |
| scaled_upper_triang_masked_softmax_cuda | APEX_BUILD_SCALED_UPPER_TRIANG_MASKED_SOFTMAX_CUDA=1 | APEX_BUILD_CUDA_OPS=1 |
| syncbn | APEX_BUILD_SYNCBN=1 | APEX_BUILD_CUDA_OPS=1 |
| transducer_joint_cuda | APEX_BUILD_TRANSDUCER_JOINT=1 | APEX_BUILD_CUDA_OPS=1 |
| transducer_loss_cuda | APEX_BUILD_TRANSDUCER_LOSS=1 | APEX_BUILD_CUDA_OPS=1 |
| xentropy_cuda | APEX_BUILD_XENTROPY=1 | APEX_BUILD_CUDA_OPS=1 |

For example, to build FUSED_DENSE​ you can use the following command:
```
APEX_BUILD_FUSED_DENSE​=1 pip install . --no-build-isolation
```
This will pre-build and install FUSED_DENSE​ module and rest of the modules are installed to be JIT built and loaded at runtime.


In addition, aiter backend can be built during apex installation by providing --aiter flag

Aiter backend can be built and used for fused rope. To install aiter:
```
# if pip >= 23.1 (ref: https://pip.pypa.io/en/stable/news/#v23-1) which supports multiple `--config-settings` with the same key...
pip install -v --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" --config-settings "--build-option=--aiter" ./
# otherwise
python setup.py install --cpp_ext --cuda_ext --aiter
make aiter
```

To use aiter in fused rope, you can use the flag ```USE_ROCM_AITER_ROPE_BACKEND=1```.

### To create a wheel and then install apex using the wheel, use the following command in apex folder:
```
python -m build --wheel --no-isolation (can use the same environment variables to build specific extensions, cpp extensions and cuda extensions)
pip install dist/apex-*.whl​
```

### To uninstall apex and its extensions, use the following command in apex folder:
```
pip uninstall apex
make clean
```

### Enable hipblasLT on ROCm
hipblasLT is supported only on mi300 (gfx942) only.
python setup.py automatically builds apex with hipblasLT support only if GPU device id is gfx942
Expand All @@ -173,33 +217,22 @@ CUDA and C++ extensions via
```bash
git clone https://github.com/rocm/apex
cd apex
# if pip >= 23.1 (ref: https://pip.pypa.io/en/stable/news/#v23-1) which supports multiple `--config-settings` with the same key...
pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" ./
# otherwise
pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --global-option="--cpp_ext" --global-option="--cuda_ext" ./
```

Apex also supports a Python-only build via
```bash
pip install -v --disable-pip-version-check --no-build-isolation --no-cache-dir ./
pip install . --no-build-isolation
```
A Python-only build omits:
- Fused kernels required to use `apex.optimizers.FusedAdam`.
- Fused kernels required to use `apex.normalization.FusedLayerNorm` and `apex.normalization.FusedRMSNorm`.
- Fused kernels that improve the performance and numerical stability of `apex.parallel.SyncBatchNorm`.
- Fused kernels that improve the performance of `apex.parallel.DistributedDataParallel` and `apex.amp`.
`DistributedDataParallel`, `amp`, and `SyncBatchNorm` will still be usable, but they may be slower.


### [Experimental] Windows
`pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" .` may work if you were able to build Pytorch from source
on your system. A Python-only build via `pip install -v --no-cache-dir .` is more likely to work.
`pip install . --no-build-isolation` may work if you were able to build Pytorch from source
on your system. A Python-only build via `pip install --no-build-isolation -v --no-cache-dir .` is more likely to work.
If you installed Pytorch in a Conda environment, make sure to install Apex in that same environment.


# Release notes

# Release notes
## release/1.10.0

Build and installation related
- Support JIT (just-in-time) load cpp and CUDA extensions

## release/1.9.0

- No new features were added in this release cycle.
Expand Down
26 changes: 14 additions & 12 deletions apex/contrib/test/run_rocm_extensions.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,25 +2,27 @@
import sys


test_dirs = ["groupbn", "fused_dense", "layer_norm", "multihead_attn", "transducer", "focal_loss", "index_mul_2d", "."] # "." for test_label_smoothing.py
test_dirs = ["groupbn", "layer_norm", "multihead_attn", "transducer", "focal_loss", "index_mul_2d", ".", \
"optimizers", "clip_grad"] # "." for test_label_smoothing.py
ROCM_BLACKLIST = [
"layer_norm"
]

runner = unittest.TextTestRunner(verbosity=2)
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)

errcode = 0
errcode = 0

for test_dir in test_dirs:
if test_dir in ROCM_BLACKLIST:
continue
suite = unittest.TestLoader().discover(test_dir)
for test_dir in test_dirs:
if test_dir in ROCM_BLACKLIST:
continue
suite = unittest.TestLoader().discover(test_dir)

print("\nExecuting tests from " + test_dir)
print("\nExecuting tests from " + test_dir)

result = runner.run(suite)
result = runner.run(suite)

if not result.wasSuccessful():
errcode = 1
if not result.wasSuccessful():
errcode = 1

sys.exit(errcode)
sys.exit(errcode)
1 change: 1 addition & 0 deletions apex/csrc
34 changes: 34 additions & 0 deletions apex/git_version_info.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0

# DeepSpeed Team

# Portions of this code were adapted from DeepSpeed:
# https://github.com/microsoft/DeepSpeed
# Modified for ROCm Apex

try:
# This is populated by setup.py
from .git_version_info_installed import * # noqa: F401 # type: ignore
except ModuleNotFoundError:
import os
if os.path.isfile('version.txt'):
# Will be missing from checkouts that haven't been installed (e.g., readthedocs)
version = open('version.txt', 'r').read().strip()
else:
version = "0.0.0"
git_hash = '[none]'
git_branch = '[none]'

from .op_builder.all_ops import ALL_OPS
installed_ops = dict.fromkeys(ALL_OPS.keys(), False)
torch_info = {'version': "0.0", "cuda_version": "0.0", "hip_version": "0.0"}

# compatible_ops list is recreated for each launch
from .op_builder.all_ops import ALL_OPS

compatible_ops = dict.fromkeys(ALL_OPS.keys(), False)
for op_name, builder in ALL_OPS.items():
op_compatible = builder.is_compatible()
compatible_ops[op_name] = op_compatible
compatible_ops["apex_not_implemented"] = False
1 change: 1 addition & 0 deletions apex/op_builder
Empty file added compatibility/__init__.py
Empty file.
37 changes: 37 additions & 0 deletions compatibility/_apex_nccl_allocator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
import sys
import importlib

class _ApexNcclAllocatorModule:
def __init__(self):
self._loaded_module = None
self._loading = False

def _load_module(self):
if self._loaded_module is None and not self._loading:
self._loading = True
try:
apex_op_builder = importlib.import_module('apex.op_builder')
builder = getattr(apex_op_builder, 'NCCLAllocatorBuilder')
self._loaded_module = builder().load()
except Exception as e:
self._loading = False
raise ImportError(f"Failed to load _apex_nccl_allocator : {e}")
finally:
self._loading = False
return self._loaded_module

def __getattr__(self, name):
if name.startswith("_") and name != "__class__":
raise AttributeError(f"module _apex_nccl_allocator has no attribute '{name}'")
return getattr(self._load_module(), name)

def __dir__(self):
try:
return dir(self._load_module())
except:
return []

def __repr__(self):
return "<module '_apex_nccl_allocator'>"

sys.modules[__name__] = _ApexNcclAllocatorModule()
37 changes: 37 additions & 0 deletions compatibility/amp_C.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
import sys
import importlib

class _AmpCModule:
def __init__(self):
self._loaded_module = None
self._loading = False

def _load_module(self):
if self._loaded_module is None and not self._loading:
self._loading = True
try:
apex_op_builder = importlib.import_module('apex.op_builder')
builder = getattr(apex_op_builder, 'AmpCBuilder')
self._loaded_module = builder().load()
except Exception as e:
self._loading = False
raise ImportError(f"Failed to load amp_C : {e}")
finally:
self._loading = False
return self._loaded_module

def __getattr__(self, name):
if name.startswith("_"):
raise AttributeError(f"module amp_C has no attribute '{name}'")
return getattr(self._load_module(), name)

def __dir__(self):
try:
return dir(self._load_module())
except:
return []

def __repr__(self):
return "<module 'amp_C'>"

sys.modules[__name__] = _AmpCModule()
37 changes: 37 additions & 0 deletions compatibility/apex_C.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
import sys
import importlib

class _ApexCModule:
def __init__(self):
self._loaded_module = None
self._loading = False

def _load_module(self):
if self._loaded_module is None and not self._loading:
self._loading = True
try:
apex_op_builder = importlib.import_module('apex.op_builder')
builder = getattr(apex_op_builder, 'ApexCBuilder')
self._loaded_module = builder().load()
except Exception as e:
self._loading = False
raise ImportError(f"Failed to load apex_C : {e}")
finally:
self._loading = False
return self._loaded_module

def __getattr__(self, name):
if name.startswith("_"):
raise AttributeError(f"module apex_C has no attribute '{name}'")
return getattr(self._load_module(), name)

def __dir__(self):
try:
return dir(self._load_module())
except:
return []

def __repr__(self):
return "<module 'apex_C'>"

sys.modules[__name__] = _ApexCModule()
Loading