Skip to content

Commit b63cbcc

Browse files
committed
Merge origin/main into cheese-head/rust-param-bindings
- Resolved merge conflict in src/bindings/rust/src/lib.rs - Applied clang-format to C++ files - All tests and formatting checks should pass
2 parents c4e25b7 + 6cf6818 commit b63cbcc

File tree

129 files changed

+6841
-5772
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

129 files changed

+6841
-5772
lines changed

.ci/dockerfiles/Dockerfile.gpu_test

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
# This Dockerfile creates a GPU-enabled test environment for NIXL (NVIDIA I/O eXchange Layer)
44
# development and testing. It provides a containerized environment with:
55
#
6-
# - NVIDIA PyTorch base image with CUDA support
6+
# - NVIDIA cuda-dl-base image with CUDA support
77
# - Non-root user setup for security
88
# - Sudo access for package installation and system configuration
99
# - Optimized for CI/CD pipeline testing
@@ -13,7 +13,7 @@
1313
# docker run --gpus all --privileged -it nixl-gpu-test
1414
#
1515
# Build arguments:
16-
# BASE_IMAGE: Base NVIDIA PyTorch image (default: nvcr.io/nvidia/pytorch:25.02-py3)
16+
# BASE_IMAGE: Base NVIDIA cuda-dl-base image (default: nvcr.io/nvidia/cuda-dl-base:25.06-cuda12.9-devel-ubuntu24.04)
1717
# _UID: User ID for the non-root user (default: 148069)
1818
# _GID: Group ID for the user (default: 30)
1919
# _LOGIN: Username (default: svc-nixl)
@@ -22,7 +22,7 @@
2222
# WORKSPACE: Workspace directory path
2323
#
2424

25-
ARG BASE_IMAGE=nvcr.io/nvidia/pytorch:25.02-py3
25+
ARG BASE_IMAGE=nvcr.io/nvidia/cuda-dl-base:25.06-cuda12.9-devel-ubuntu24.04
2626

2727
FROM ${BASE_IMAGE}
2828

@@ -32,7 +32,7 @@ ARG _GID=30
3232
ARG _LOGIN=svc-nixl
3333
ARG _GROUP=hardware
3434
ARG _HOME=/home/$_LOGIN
35-
ARG WORKSPACE
35+
ARG WORKSPACE=/workspace
3636

3737
# Labels for documentation
3838
LABEL maintainer="NVIDIA NIXL Team"
@@ -41,7 +41,7 @@ LABEL version="1.0"
4141

4242
# Update package list and install required packages in one layer
4343
RUN apt-get update && \
44-
apt-get install -y sudo \
44+
apt-get install -y sudo python3 python3-pip \
4545
&& apt-get clean \
4646
&& rm -rf /var/lib/apt/lists/*
4747

@@ -59,6 +59,9 @@ RUN mkdir -p /etc/sudoers.d && \
5959
chmod 440 /etc/sudoers.d/${_LOGIN} && \
6060
chown root:root /etc/sudoers.d/${_LOGIN}
6161

62+
# Create and set permissions for workspace directory
63+
RUN mkdir -p ${WORKSPACE} && chmod 777 ${WORKSPACE}
64+
6265
# Copy workspace into container (workaround for files disappearing from workspace)
6366
COPY --chown="${_UID}":"${_GID}" . ${WORKSPACE}
6467

.ci/docs/setup_nvidia_gpu_with_rdma_support_on_ubuntu.md

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,10 +93,16 @@ GDS is bundled with CUDA ≥11.4 but requires explicit enabling:
9393

9494
```bash
9595
sudo echo "options nvidia NVreg_EnableGpuDirectStorage=1" > /etc/modprobe.d/nvidia-gds.conf
96+
97+
# Configure PeerMappingOverride for GPU-Initiated RDMA support
98+
sudo echo "options nvidia NVreg_RegistryDwords=\"PeerMappingOverride=1;\"" > /etc/modprobe.d/nvidia.conf
99+
96100
sudo update-initramfs -u
97101
sudo reboot
98102
```
99103

104+
The `PeerMappingOverride=1` option is required for proper GPU peer-to-peer communication in RDMA environments.
105+
100106
### 6. **Enable Kernel Modules at Boot**
101107

102108
Add required modules to `/etc/modules`:
@@ -131,7 +137,7 @@ sudo nvidia-ctk runtime configure --runtime=docker
131137
sudo systemctl restart docker
132138
```
133139

134-
Verify GPU access in containers using `docker run --gpus all nvcr.io/nvidia/pytorch:25.02-py3 nvidia-smi`[^1_3].
140+
Verify GPU access in containers using `docker run --gpus all nvcr.io/nvidia/cuda-dl-base:25.06-cuda12.9-devel-ubuntu24.04 nvidia-smi`[^1_3].
135141

136142
### 9. **Validation and Troubleshooting**
137143

.ci/jenkins/lib/build-container-matrix.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@ env:
3131
REGISTRY_REPO: "sw-nbu-swx-nixl-docker-local/verification"
3232
LOCAL_TAG_BASE: "nixl-ci:build-"
3333
MAIL_FROM: "[email protected]"
34-
NPROC: "16"
3534

3635
taskName: "${BUILD_TARGET}/${arch}/${axis_index}"
3736

@@ -89,6 +88,7 @@ steps:
8988
--base-image-tag "${BASE_IMAGE_TAG}" \
9089
--tag "${LOCAL_TAG_BASE}${arch}" \
9190
--arch "${arch}" \
91+
--build-type debug \
9292
--no-cache
9393
9494
- name: Add Version Info
@@ -175,7 +175,7 @@ steps:
175175
echo "benchmark/nixlbench/contrib/build.sh --base-image ${BASE_IMAGE} --base-image-tag ${BASE_IMAGE_TAG} --tag local-test-tag --arch ${arch} --no-cache --nixl \$WORKSPACE --ucx \$WORKSPACE/ucx-src"
176176
else
177177
echo "export UCX_REF=${UCX_VERSION}"
178-
echo "contrib/build-container.sh --base-image ${BASE_IMAGE} --base-image-tag ${BASE_IMAGE_TAG} --tag local-test-tag --arch ${arch} --no-cache"
178+
echo "contrib/build-container.sh --base-image ${BASE_IMAGE} --base-image-tag ${BASE_IMAGE_TAG} --tag local-test-tag --arch ${arch} --build-type debug --no-cache"
179179
fi
180180
181181
pipeline_stop:

.ci/jenkins/lib/build-matrix.yaml

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
# Key Components:
77
# - Job Configuration: Defines timeout, failure behavior, and Kubernetes resources
88
# - Docker Images: Specifies the container images used for different build stages
9-
# - PyTorch images (24.10 and 25.02) for building and testing
9+
# - cuda-dl-base images (25.06 for Ubuntu 24.04, 24.10 for Ubuntu 22.04) for building and testing
1010
# - Podman image for container builds
1111
# - Matrix Axes: Defines build variations (currently x86_64 architecture)
1212
# - Build Steps: Sequential steps for building, testing, and container creation
@@ -34,8 +34,8 @@ kubernetes:
3434
requests: "{memory: 8Gi, cpu: 8000m}"
3535

3636
runs_on_dockers:
37-
- { name: "ubuntu24.04-pytorch", url: "nvcr.io/nvidia/pytorch:25.02-py3" }
38-
- { name: "ubuntu22.04-pytorch", url: "nvcr.io/nvidia/pytorch:24.10-py3" }
37+
- { name: "ubuntu24.04-cuda-dl-base", url: "nvcr.io/nvidia/cuda-dl-base:25.06-cuda12.9-devel-ubuntu24.04" }
38+
- { name: "ubuntu22.04-cuda-dl-base", url: "nvcr.io/nvidia/cuda-dl-base:24.10-cuda12.6-devel-ubuntu22.04" }
3939
- { name: "podman-v5.0.2", url: "quay.io/podman/stable:v5.0.2", category: 'tool', privileged: true }
4040

4141
matrix:
@@ -47,17 +47,12 @@ matrix:
4747
env:
4848
NIXL_INSTALL_DIR: /opt/nixl
4949
TEST_TIMEOUT: 30
50-
NPROC: "16"
5150
UCX_TLS: "^shm"
5251

5352
steps:
5453
- name: Build
5554
parallel: false
5655
run: |
57-
if [[ "${name}" == *"ubuntu22.04"* ]]; then
58-
# distro's meson version is too old project requires >= 0.64.0
59-
pip3 install meson
60-
fi
6156
.gitlab/build.sh ${NIXL_INSTALL_DIR}
6257
6358
- name: Test CPP
@@ -94,4 +89,4 @@ steps:
9489
ln -sfT $(type -p podman) /usr/bin/docker
9590
# install git for building container image
9691
yum install -y git
97-
contrib/build-container.sh --no-cache
92+
contrib/build-container.sh --build-type debug --no-cache

.ci/jenkins/lib/test-matrix.yaml

Lines changed: 17 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -30,18 +30,22 @@ runs_on_agents:
3030
matrix:
3131
axes:
3232
image:
33-
- nvcr.io/nvidia/pytorch:25.02-py3
33+
- nvcr.io/nvidia/cuda-dl-base:25.06-cuda12.9-devel-ubuntu24.04
3434
arch:
3535
- x86_64
36+
ucx_version:
37+
- master
38+
- v1.20.x
3639

37-
taskName: "${name}/${arch}/${axis_index}"
40+
taskName: "${name}/${arch}/ucx-${ucx_version}/${axis_index}"
3841

3942
env:
40-
INSTALL_DIR: ${WORKSPACE}/nixl_install
41-
UCX_VERSION: v1.19.0
42-
NPROC: "16"
43+
CONTAINER_WORKSPACE: /workspace
44+
INSTALL_DIR: ${CONTAINER_WORKSPACE}/nixl_install
4345
# Manual timeout - ci-demo doesn't handle docker exec
4446
TEST_TIMEOUT: 30
47+
# NPROC for bare-metal: containers see all host CPUs, need to limit parallelism
48+
NPROC: 16
4549

4650
steps:
4751
- name: Get Environment Info
@@ -71,7 +75,7 @@ steps:
7175
- name: Build GPU Test Environment
7276
parallel: false
7377
run: |
74-
docker build -t "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}" -f .ci/dockerfiles/Dockerfile.gpu_test --build-arg BASE_IMAGE=${image} --build-arg WORKSPACE=${WORKSPACE} .
78+
docker build -t "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}" -f .ci/dockerfiles/Dockerfile.gpu_test --build-arg BASE_IMAGE=${image} --build-arg WORKSPACE=${CONTAINER_WORKSPACE} .
7579
onfail: docker image rm -f "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}"
7680

7781
- name: Run GPU Test Environment
@@ -93,39 +97,41 @@ steps:
9397
- name: Build
9498
parallel: false
9599
run: |
96-
docker exec -w ${WORKSPACE} -e UCX_VERSION=${UCX_VERSION} "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}" /bin/bash -c ".gitlab/build.sh ${INSTALL_DIR}"
100+
set -ex
101+
docker exec -w ${CONTAINER_WORKSPACE} "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}" /bin/bash -c "UCX_VERSION=${ucx_version} .gitlab/build.sh ${INSTALL_DIR}"
102+
97103
onfail: |
98104
docker rm -f "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}"
99105
docker image rm -f "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}"
100106
101107
- name: Test CPP
102108
parallel: false
103109
run: |
104-
timeout ${TEST_TIMEOUT}m docker exec -w ${WORKSPACE} "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}" /bin/bash -c ".gitlab/test_cpp.sh ${INSTALL_DIR}"
110+
timeout ${TEST_TIMEOUT}m docker exec -w ${CONTAINER_WORKSPACE} "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}" /bin/bash -c ".gitlab/test_cpp.sh ${INSTALL_DIR}"
105111
onfail: |
106112
docker rm -f "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}"
107113
docker image rm -f "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}"
108114
109115
- name: Test Python
110116
parallel: false
111117
run: |
112-
timeout ${TEST_TIMEOUT}m docker exec -w ${WORKSPACE} "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}" /bin/bash -c ".gitlab/test_python.sh ${INSTALL_DIR}"
118+
timeout ${TEST_TIMEOUT}m docker exec -w ${CONTAINER_WORKSPACE} "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}" /bin/bash -c ".gitlab/test_python.sh ${INSTALL_DIR}"
113119
onfail: |
114120
docker rm -f "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}"
115121
docker image rm -f "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}"
116122
117123
- name: Test Nixlbench
118124
parallel: false
119125
run: |
120-
timeout ${TEST_TIMEOUT}m docker exec -w ${WORKSPACE} "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}" /bin/bash -c ".gitlab/test_nixlbench.sh ${INSTALL_DIR}"
126+
timeout ${TEST_TIMEOUT}m docker exec -w ${CONTAINER_WORKSPACE} "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}" /bin/bash -c ".gitlab/test_nixlbench.sh ${INSTALL_DIR}"
121127
onfail: |
122128
docker rm -f "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}"
123129
docker image rm -f "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}"
124130
125131
- name: Test Rust
126132
parallel: false
127133
run: |
128-
timeout ${TEST_TIMEOUT}m docker exec -w ${WORKSPACE} "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}" /bin/bash -c ".gitlab/test_rust.sh ${INSTALL_DIR}"
134+
timeout ${TEST_TIMEOUT}m docker exec -w ${CONTAINER_WORKSPACE} "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}" /bin/bash -c ".gitlab/test_rust.sh ${INSTALL_DIR}"
129135
always: |
130136
docker rm -f "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}"
131137
docker image rm -f "${JOB_BASE_NAME}-${BUILD_ID}-${axis_index}"

.ci/jenkins/pipeline/proj-jjb.yaml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -269,18 +269,18 @@
269269
- string:
270270
name: "NIXL_VERSION"
271271
default: "{jjb_branch}"
272-
description: "NIXL version to use (tag like 0.6.1, branch name, or commit hash)"
272+
description: "NIXL version to use (tag like 0.7.1, branch name, or commit hash)"
273273
- string:
274274
name: "UCX_VERSION"
275-
default: "v1.19.0"
276-
description: "UCX version to use (tag like v1.19.0, branch name, or commit hash)"
275+
default: "v1.20.x"
276+
description: "UCX version to use (tag like v1.20.x, branch name, or commit hash)"
277277
- string:
278278
name: "BASE_IMAGE"
279279
default: "nvcr.io/nvidia/cuda-dl-base"
280280
description: "Base Docker image for the container build"
281281
- string:
282282
name: "BASE_IMAGE_TAG"
283-
default: "25.03-cuda12.8-devel-ubuntu24.04"
283+
default: "25.06-cuda12.9-devel-ubuntu24.04"
284284
description: "Tag for the base Docker image"
285285
- string:
286286
name: "TAG_SUFFIX"
@@ -294,7 +294,7 @@
294294
description: >
295295
Update the latest tag for this architecture.<br/>
296296
When enabled, also creates: <code>&lt;base-image-tag&gt;-&lt;arch&gt;-latest</code><br/>
297-
Example: <code>25.03-cuda12.8-devel-ubuntu24.04-aarch64-latest</code><br/>
297+
Example: <code>25.06-cuda12.9-devel-ubuntu24.04-aarch64-latest</code><br/>
298298
- string:
299299
name: "MAIL_TO"
300300
default: "[email protected]"

.ci/scripts/common.sh

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,11 @@ max_gtest_port=$((tcp_port_max + gtest_offset))
7878
# Check if a GPU is present
7979
nvidia-smi -L | grep -q '^GPU' && HAS_GPU=true || HAS_GPU=false
8080

81+
# Ensure CUDA_HOME is set if CUDA is installed (cuda-dl-base images don't set it by default)
82+
if [ -d "/usr/local/cuda" ] && [ -z "$CUDA_HOME" ]; then
83+
export CUDA_HOME=/usr/local/cuda
84+
fi
85+
8186
if $HAS_GPU && test -d "$CUDA_HOME"
8287
then
8388
UCX_CUDA_BUILD_ARGS="--with-cuda=${CUDA_HOME}"
@@ -89,3 +94,24 @@ fi
8994

9095
# Default to false, unless TEST_LIBFABRIC is set. AWS EFA tests must set it to true.
9196
export TEST_LIBFABRIC=${TEST_LIBFABRIC:-false}
97+
98+
# Set default parallelism for make/ninja (can be overridden by NPROC env var)
99+
if [ -z "$NPROC" ]; then
100+
# In containers, calculate based on memory limits to avoid OOM
101+
if [[ -f /.dockerenv || -f /run/.containerenv || -n "${KUBERNETES_SERVICE_HOST}" ]]; then
102+
if [ -f /sys/fs/cgroup/memory/memory.limit_in_bytes ]; then
103+
limit=$(cat /sys/fs/cgroup/memory/memory.limit_in_bytes)
104+
elif [ -f /sys/fs/cgroup/memory.max ]; then
105+
limit=$(cat /sys/fs/cgroup/memory.max)
106+
else
107+
limit=$((4 * 1024 * 1024 * 1024))
108+
fi
109+
# Use 1 process per GB of memory, max 16
110+
nproc=$((limit / (1024 * 1024 * 1024)))
111+
nproc=$((nproc > 16 ? 16 : nproc))
112+
nproc=$((nproc < 1 ? 1 : nproc))
113+
else
114+
nproc=$(nproc --all)
115+
fi
116+
export NPROC=$nproc
117+
fi

.gitlab/build.sh

Lines changed: 27 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,9 @@ INSTALL_DIR=$1
2727
UCX_INSTALL_DIR=$2
2828
EXTRA_BUILD_ARGS=${3:-""}
2929
# UCX_VERSION is the version of UCX to build override default with env variable.
30-
UCX_VERSION=${UCX_VERSION:-v1.19.0}
30+
UCX_VERSION=${UCX_VERSION:-v1.20.x}
3131
# LIBFABRIC_VERSION is the version of libfabric to build override default with env variable.
32-
LIBFABRIC_VERSION=${LIBFABRIC_VERSION:-v2.3.0}
32+
LIBFABRIC_VERSION=${LIBFABRIC_VERSION:-v1.21.0}
3333
# LIBFABRIC_INSTALL_DIR can be set via environment variable, defaults to INSTALL_DIR
3434
LIBFABRIC_INSTALL_DIR=${LIBFABRIC_INSTALL_DIR:-$INSTALL_DIR}
3535

@@ -57,7 +57,9 @@ ARCH=$(uname -m)
5757
$SUDO rm -rf /usr/lib/cmake/grpc /usr/lib/cmake/protobuf
5858

5959
$SUDO apt-get -qq update
60-
$SUDO apt-get -qq install -y curl \
60+
$SUDO apt-get -qq install -y python3-dev \
61+
python3-pip \
62+
curl \
6163
wget \
6264
libnuma-dev \
6365
numactl \
@@ -101,6 +103,17 @@ $SUDO apt-get -qq install -y curl \
101103
libhwloc-dev \
102104
libcurl4-openssl-dev zlib1g-dev # aws-sdk-cpp dependencies
103105

106+
# Ubuntu 22.04 specific setup
107+
if grep -q "Ubuntu 22.04" /etc/os-release 2>/dev/null; then
108+
# Upgrade pip for '--break-system-packages' support
109+
$SUDO pip3 install --upgrade pip
110+
111+
# Upgrade meson (distro version 0.61.2 is too old, project requires >= 0.64.0)
112+
$SUDO pip3 install --upgrade meson
113+
# Ensure pip3's meson takes precedence over apt's version
114+
export PATH="$HOME/.local/bin:/usr/local/bin:$PATH"
115+
fi
116+
104117
# Add DOCA repository and install packages
105118
ARCH_SUFFIX=$(if [ "${ARCH}" = "aarch64" ]; then echo "arm64"; else echo "amd64"; fi)
106119
MELLANOX_OS="$(. /etc/lsb-release; echo ${DISTRIB_ID}${DISTRIB_RELEASE} | tr A-Z a-z | tr -d .)"
@@ -122,6 +135,11 @@ chmod +x rustup-init
122135
./rustup-init -y --default-toolchain 1.86.0
123136
export PATH="$HOME/.cargo/bin:$PATH"
124137

138+
wget --tries=3 --waitretry=5 "https://astral.sh/uv/install.sh" -O install_uv.sh
139+
chmod +x install_uv.sh
140+
./install_uv.sh
141+
export PATH="$HOME/.local/bin:$PATH"
142+
125143
curl -fSsL "https://github.com/openucx/ucx/tarball/${UCX_VERSION}" | tar xz
126144
( \
127145
cd openucx-ucx* && \
@@ -167,7 +185,7 @@ rm "libfabric-${LIBFABRIC_VERSION#v}.tar.bz2"
167185
cd etcd-cpp-apiv3 && \
168186
mkdir build && cd build && \
169187
cmake .. && \
170-
make -j"${NPROC:-$(nproc)}" && \
188+
make -j"$NPROC" && \
171189
$SUDO make install && \
172190
$SUDO ldconfig \
173191
)
@@ -178,7 +196,7 @@ rm "libfabric-${LIBFABRIC_VERSION#v}.tar.bz2"
178196
mkdir aws_sdk_build && \
179197
cd aws_sdk_build && \
180198
cmake ../aws-sdk-cpp/ -DCMAKE_BUILD_TYPE=Release -DBUILD_ONLY="s3" -DENABLE_TESTING=OFF -DCMAKE_INSTALL_PREFIX=/usr/local && \
181-
make -j"${NPROC:-$(nproc)}" && \
199+
make -j"$NPROC" && \
182200
$SUDO make install
183201
)
184202

@@ -209,12 +227,13 @@ export CMAKE_PREFIX_PATH="${INSTALL_DIR}:${CMAKE_PREFIX_PATH}"
209227
export UCX_TLS=^cuda_ipc
210228

211229
# shellcheck disable=SC2086
212-
meson setup nixl_build --prefix=${INSTALL_DIR} -Ducx_path=${UCX_INSTALL_DIR} -Dbuild_docs=true -Drust=false ${EXTRA_BUILD_ARGS} -Dlibfabric_path="${LIBFABRIC_INSTALL_DIR}"
213-
ninja -C nixl_build && ninja -C nixl_build install
230+
meson setup nixl_build --prefix=${INSTALL_DIR} -Ducx_path=${UCX_INSTALL_DIR} -Dbuild_docs=true -Drust=false ${EXTRA_BUILD_ARGS} -Dlibfabric_path="${LIBFABRIC_INSTALL_DIR}" --buildtype=debug
231+
ninja -j"$NPROC" -C nixl_build && ninja -j"$NPROC" -C nixl_build install
232+
mkdir -p dist && cp nixl_build/src/bindings/python/nixl-meta/nixl-*.whl dist/
214233

215234
# TODO(kapila): Copy the nixl.pc file to the install directory if needed.
216235
# cp ${BUILD_DIR}/nixl.pc ${INSTALL_DIR}/lib/pkgconfig/nixl.pc
217236

218237
cd benchmark/nixlbench
219238
meson setup nixlbench_build -Dnixl_path=${INSTALL_DIR} -Dprefix=${INSTALL_DIR}
220-
ninja -C nixlbench_build && ninja -C nixlbench_build install
239+
ninja -j"$NPROC" -C nixlbench_build && ninja -j"$NPROC" -C nixlbench_build install

0 commit comments

Comments
 (0)