Skip to content

Commit 926bbf3

Browse files
committed
Add XPU to binary build generation
1 parent 52eb4b5 commit 926bbf3

File tree

3 files changed

+33
-1
lines changed

3 files changed

+33
-1
lines changed

.github/workflows/build_wheels_linux.yml

+5
Original file line numberDiff line numberDiff line change
@@ -171,6 +171,11 @@ jobs:
171171
run: |
172172
set -euxo pipefail
173173
cat "${{ inputs.env-var-script }}" >> "${BUILD_ENV_FILE}"
174+
- name: Add XPU Env Vars in Build Env File
175+
if: ${{ matrix.gpu_arch_type == 'xpu' }}
176+
run: |
177+
echo "set +u" >> "${BUILD_ENV_FILE}"
178+
echo "source /opt/intel/oneapi/pytorch-gpu-dev-0.5/oneapi-vars.sh" >> "${BUILD_ENV_FILE}"
174179
- name: Install torch dependency
175180
run: |
176181
set -euxo pipefail

.github/workflows/generate_binary_build_matrix.yml

+5
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,10 @@ on:
3535
description: "Build with CPU?"
3636
default: "enable"
3737
type: string
38+
with-xpu:
39+
description: "Build with XPU?"
40+
default: "enable"
41+
type: string
3842
use-only-dl-pytorch-org:
3943
description: "Use only download.pytorch.org when generating wheel install command?"
4044
default: "false"
@@ -80,6 +84,7 @@ jobs:
8084
WITH_CUDA: ${{ inputs.with-cuda }}
8185
WITH_ROCM: ${{ inputs.with-rocm }}
8286
WITH_CPU: ${{ inputs.with-cpu }}
87+
WITH_XPU: ${{ inputs.with-xpu }}
8388
# limit pull request builds to one version of python unless ciflow/binaries/all is applied to the workflow
8489
# should not affect builds that are from events that are not the pull_request event
8590
LIMIT_PR_BUILDS: ${{ github.event_name == 'pull_request' && !contains( github.event.pull_request.labels.*.name, 'ciflow/binaries/all') }}

tools/scripts/generate_binary_build_matrix.py

+23-1
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@
6161
CUDA_AARCH64 = "cuda-aarch64"
6262
CUDA = "cuda"
6363
ROCM = "rocm"
64+
XPU = "xpu"
6465

6566

6667
CURRENT_NIGHTLY_VERSION = "2.5.0"
@@ -107,6 +108,8 @@ def arch_type(arch_version: str) -> str:
107108
return CPU_AARCH64
108109
elif arch_version == CUDA_AARCH64:
109110
return CUDA_AARCH64
111+
elif arch_version == XPU:
112+
return XPU
110113
else: # arch_version should always be CPU in this case
111114
return CPU
112115

@@ -160,6 +163,7 @@ def initialize_globals(channel: str, build_python_only: bool) -> None:
160163
for gpu_arch in ROCM_ARCHES
161164
},
162165
CPU: "pytorch/manylinux-builder:cpu",
166+
XPU: "pytorch/manylinux2_28-builder:xpu",
163167
CPU_AARCH64: "pytorch/manylinuxaarch64-builder:cpu-aarch64",
164168
CUDA_AARCH64: "pytorch/manylinuxaarch64-builder:cuda12.4",
165169
}
@@ -199,6 +203,7 @@ def translate_desired_cuda(gpu_arch_type: str, gpu_arch_version: str) -> str:
199203
CUDA_AARCH64: "cu124",
200204
CUDA: f"cu{gpu_arch_version.replace('.', '')}",
201205
ROCM: f"rocm{gpu_arch_version}",
206+
XPU: "xpu",
202207
}.get(gpu_arch_type, gpu_arch_version)
203208

204209

@@ -327,6 +332,7 @@ def generate_conda_matrix(
327332
with_cuda: str,
328333
with_rocm: str,
329334
with_cpu: str,
335+
with_xpu: str,
330336
limit_pr_builds: bool,
331337
use_only_dl_pytorch_org: bool,
332338
use_split_build: bool = False,
@@ -383,6 +389,7 @@ def generate_libtorch_matrix(
383389
with_cuda: str,
384390
with_rocm: str,
385391
with_cpu: str,
392+
with_xpu: str,
386393
limit_pr_builds: bool,
387394
use_only_dl_pytorch_org: bool,
388395
use_split_build: bool = False,
@@ -472,6 +479,7 @@ def generate_wheels_matrix(
472479
with_cuda: str,
473480
with_rocm: str,
474481
with_cpu: str,
482+
with_xpu: str,
475483
limit_pr_builds: bool,
476484
use_only_dl_pytorch_org: bool,
477485
use_split_build: bool = False,
@@ -510,6 +518,10 @@ def generate_wheels_matrix(
510518
if os == LINUX:
511519
arches += ROCM_ARCHES
512520

521+
if with_xpu == ENABLE:
522+
if os == LINUX:
523+
arches += [XPU]
524+
513525
if limit_pr_builds:
514526
python_versions = [python_versions[0]]
515527

@@ -523,7 +535,7 @@ def generate_wheels_matrix(
523535
continue
524536
gpu_arch_version = (
525537
""
526-
if arch_version in [CPU, CPU_AARCH64]
538+
if arch_version in [CPU, CPU_AARCH64, XPU]
527539
else arch_version
528540
)
529541

@@ -579,6 +591,7 @@ def generate_build_matrix(
579591
with_cuda: str,
580592
with_rocm: str,
581593
with_cpu: str,
594+
with_xpu: str,
582595
limit_pr_builds: str,
583596
use_only_dl_pytorch_org: str,
584597
build_python_only: str,
@@ -602,6 +615,7 @@ def generate_build_matrix(
602615
with_cuda,
603616
with_rocm,
604617
with_cpu,
618+
with_xpu,
605619
limit_pr_builds == "true",
606620
use_only_dl_pytorch_org == "true",
607621
use_split_build == "true",
@@ -653,6 +667,13 @@ def main(args: List[str]) -> None:
653667
choices=[ENABLE, DISABLE],
654668
default=os.getenv("WITH_CPU", ENABLE),
655669
)
670+
parser.add_argument(
671+
"--with-xpu",
672+
help="Build with XPU?",
673+
type=str,
674+
choices=[ENABLE, DISABLE],
675+
default=os.getenv("WITH_XPU", ENABLE),
676+
)
656677
# By default this is false for this script but expectation is that the caller
657678
# workflow will default this to be true most of the time, where a pull
658679
# request is synchronized and does not contain the label "ciflow/binaries/all"
@@ -705,6 +726,7 @@ def main(args: List[str]) -> None:
705726
options.with_cuda,
706727
options.with_rocm,
707728
options.with_cpu,
729+
options.with_xpu,
708730
options.limit_pr_builds,
709731
options.use_only_dl_pytorch_org,
710732
options.build_python_only,

0 commit comments

Comments
 (0)