diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index 11a14256801a..000000000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,89 +0,0 @@ -repos: -- repo: meta - hooks: - - id: check-hooks-apply - - id: check-useless-excludes - -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.0.1 - hooks: - - id: check-case-conflict - - id: check-json - - id: check-symlinks - - id: check-yaml - - id: destroyed-symlinks - - id: end-of-file-fixer - exclude: docs/CNAME - - id: fix-byte-order-marker - - id: fix-encoding-pragma - args: [--remove] - - id: mixed-line-ending - args: [--fix=lf] - - id: requirements-txt-fixer - - id: trailing-whitespace - -- repo: https://github.com/google/yapf - rev: v0.32.0 - hooks: - - id: yapf - -- repo: https://gitlab.com/daverona/pre-commit/cpp - rev: 0.8.0 - hooks: - - id: clang-format # formatter of C/C++ code based on a style guide: LLVM, Google, Chromium, Mozilla, and WebKit available - args: [] - -- repo: local - hooks: - - id: check-torchdist - name: check-torchdist - entry: ./scripts/check-torchdist.py - language: python - exclude: ^(deepspeed/comm/|docs/|benchmarks/|scripts/check-torchdist.py|deepspeed/moe/sharded_moe.py|deepspeed/runtime/comm/coalesced_collectives.py|deepspeed/elasticity/elastic_agent.py|deepspeed/launcher/launch.py|tests/unit/comm/test_dist.py) - # Specific deepspeed/ files are excluded for now until we wrap ProcessGroup in deepspeed.comm - -- repo: local - hooks: - - id: check-license - name: check-license - entry: ./scripts/check-license.py - language: python - files: \.(py|c|cpp|cu|cc|h|hpp|cuh|hip|tr)$ - exclude: ^(deepspeed/inference/v2/kernels/ragged_ops/blocked_flash|deepspeed/inference/v2/kernels/cutlass_ops/grouped_gemm) - -- repo: https://github.com/codespell-project/codespell - rev: v2.1.0 - hooks: - - id: codespell - args: [ - # Do not check files that are automatically generated - '--skip=docs/Gemfile.lock,tests/unit/gpt2-merges.txt,tests/unit/gpt2-vocab.json', - '--ignore-regex=\\n', # Do not count the 'n' in an escaped newline as part of a word - '--ignore-words-list=youn,unsupport,noe', # Word used in error messages that need rewording - --check-filenames, - --check-hidden - ] - -- repo: https://github.com/pycqa/flake8 - rev: 4.0.1 - hooks: - - id: flake8 - args: ['--config=.flake8'] - -- repo: local - hooks: - - id: check-torchcuda - name: check-torchcuda - entry: ./scripts/check-torchcuda.py - language: python - exclude: ^(.github/workflows/|scripts/check-torchcuda.py|docs/_tutorials/accelerator-abstraction-interface.md|accelerator/cuda_accelerator.py|deepspeed/inference/engine.py|deepspeed/model_implementations/transformers/clip_encoder.py|deepspeed/model_implementations/diffusers/vae.py|deepspeed/model_implementations/diffusers/unet.py|op_builder/spatial_inference.py|op_builder/transformer_inference.py|op_builder/builder.py|setup.py|tests/unit/ops/sparse_attention/test_sparse_attention.py) - # Specific deepspeed/ files are excluded for now until we wrap ProcessGroup in deepspeed.comm - -- repo: local - hooks: - - id: check-extraindexurl - name: check-extraindexurl - entry: ./scripts/check-extraindexurl.py - language: python - files: \.(yml|yaml|sh|py)$ - exclude: ^(scripts/check-extraindexurl.py) diff --git a/CODEOWNERS b/CODEOWNERS index 2c16aef39a1b..ab0f37d26ff0 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -5,52 +5,6 @@ # Learn more about CODEOWNERS syntax here: # https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners +* nelyahu@habana.ai lbachar@habana.ai oelayan@habana.ai mkovalenko@habana.ai nsonnenschein@habana.ai snahir@habana.ai mmalekan@habana.ai -# top-level repo folders -/.github/ @mrwyattii @loadams -/azure/ @mrwyattii @awan-10 -/benchmarks/ @awan-10 @mrwyattii -/bin/ @mrwyattii -/csrc/ @awan-10 @mrwyattii @cmikeh2 @arashb -/deepspeed/ @mrwyattii -/docker/ @mrwyattii @awan-10 -/docs/ @mrwyattii -/examples/ @awan-10 @mrwyattii -/op_builder/ @mrwyattii @cmikeh2 -/release/ @loadams @mrwyattii -/requirements/ @loadams @mrwyattii -/scripts/ @mrwyattii @awan-10 -/tests/ @mrwyattii @tjruwase @loadams - -# deepspeed -/deepspeed/autotuning/ @mrwyattii -/deepspeed/checkpoint/ @tjruwase -/deepspeed/comm/ @awan-10 -/deepspeed/compression/ @minjiaz @xiaoxiawu-microsoft @conglongli -/deepspeed/elasticity/ @mrwyattii @awan-10 -/deepspeed/launcher/ @mrwyattii @awan-10 -/deepspeed/module_inject/ @mrwyattii @awan-10 @cmikeh2 @arashb -/deepspeed/moe/ @awan-10 -/deepspeed/monitor/ @awan-10 @mrwyattii -/deepspeed/nebula/ @tjruwase @mrwyattii -/deepspeed/ops/ @mrwyattii @awan-10 @cmikeh2 @arashb -/deepspeed/pipe/ @ShadenSmith @duli2012 -/deepspeed/profiling/ @ShijieZZZZ -/deepspeed/utils/ @mrwyattii @tjruwase @awan-10 - -# inference -/deepspeed/inference/ @mrwyattii @awan-10 @cmikeh2 @arashb -/deepspeed/model_implementations/ @mrwyattii @awan-10 @cmikeh2 @arashb - -# training -/deepspeed/runtime/ @mrwyattii @tjruwase -/deepspeed/runtime/activation_checkpointing/ @mrwyattii @tjruwase -/deepspeed/runtime/checkpoint_engine/ @tjruwase @mrwyattii -/deepspeed/runtime/comm/ @awan-10 -/deepspeed/runtime/compression/ @awan-10 @conglongli -/deepspeed/runtime/data_pipeline/ @conglongli -/deepspeed/runtime/fp16/ @mrwyattii @tjruwase -/deepspeed/runtime/fp16/onebit/ @conglongli @awan-10 -/deepspeed/runtime/pipe/ @ShadenSmith @duli2012 -/deepspeed/runtime/swap_tensor/ @tjruwase @mrwyattii -/deepspeed/runtime/zero/ @tjruwase @mrwyattii +CODEOWNERS nelyahu@habana.ai oabramovich@habana.ai geilat@habana.ai diff --git a/accelerator/hpu_accelerator.py b/accelerator/hpu_accelerator.py index dd87461696cf..2974f73baa64 100644 --- a/accelerator/hpu_accelerator.py +++ b/accelerator/hpu_accelerator.py @@ -3,6 +3,7 @@ # DeepSpeed Team +import functools import os import pkgutil import importlib @@ -17,6 +18,7 @@ def __init__(self): self._name = 'hpu' self._communication_backend_name = 'hccl' self._compile_backend = "hpu_backend" + self.apply_hpu_workarounds() try: import habana_frameworks.torch.hpu as hpu hpu.setDeterministic(True) @@ -27,6 +29,15 @@ def __init__(self): self.fp16_supported = None + def apply_hpu_workarounds(self): + + def update_wa_env_var(key, value): + if key not in os.environ.keys(): + os.environ[key] = value + + update_wa_env_var("PT_HPU_LAZY_ACC_PAR_MODE", "0") + update_wa_env_var("PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES", "0") + # Device APIs def is_synchronized_device(self): return False @@ -41,9 +52,8 @@ def handles_memory_backpressure(self): return True def device_name(self, device_index=None): - if device_index is None: - return 'hpu' - return 'hpu:{}'.format(device_index) + # ignoring device_index. + return 'hpu' def device(self, device_index=None): return torch.device(self.device_name(device_index)) @@ -196,31 +206,31 @@ def replay_graph(self, graph): # Tensor operations @property def BFloat16Tensor(self): - return self.hpu.BFloat16Tensor + return functools.partial(torch.tensor, dtype=torch.bfloat16, device='hpu') @property def ByteTensor(self): - return self.hpu.ByteTensor + return functools.partial(torch.tensor, dtype=torch.uint8, device='hpu') @property def DoubleTensor(self): - return self.hpu.DoubleTensor + return functools.partial(torch.tensor, dtype=torch.double, device='hpu') @property def FloatTensor(self): - return self.hpu.FloatTensor + return functools.partial(torch.tensor, dtype=torch.float, device='hpu') @property def HalfTensor(self): - return self.hpu.HalfTensor + return functools.partial(torch.tensor, dtype=torch.half, device='hpu') @property def IntTensor(self): - return self.hpu.IntTensor + return functools.partial(torch.tensor, dtype=torch.int, device='hpu') @property def LongTensor(self): - return self.hpu.LongTensor + return functools.partial(torch.tensor, dtype=torch.long, device='hpu') def pin_memory(self, tensor, align_bytes=1): return tensor.pin_memory(self.device()) @@ -289,6 +299,14 @@ def get_op_builder(self, class_name): else: return self.class_dict['NotImplementedBuilder'] if 'NotImplementedBuilder' in self.class_dict else None + #shall be removed once moving to torch.compile + def wrap_in_hpu_graph(self, module): + if self.hpu.is_lazy(): + module = self.hpu.wrap_in_hpu_graph(module) + else: + print("Warning: hpu graphs in eager mode is not supported, ignoring") + return module + def build_extension(self): from torch.utils.cpp_extension import BuildExtension return BuildExtension @@ -297,7 +315,12 @@ def export_envs(self): return [] def visible_devices_envs(self): - return ['HABANA_VISIBLE_MODULES'] + # TODO SW-195658: remove WA to not return HABANA_VISIBLE_MODULES once SW-195657 is resolved + # Current way deepspeed set this env var is not applicable with all HPU instances + # User has to follow instructions in: + # https://docs.habana.ai/en/latest/PyTorch/Reference/PT_Multiple_Tenants_on_HPU/Multiple_Workloads_Single_Docker.html + # keeping CUDA_VISIBLE_DEVICES + return ['CUDA_VISIBLE_DEVICES'] #['HABANA_VISIBLE_MODULES'] def set_visible_devices_envs(self, current_env, local_accelerator_ids): for env in self.visible_devices_envs(): diff --git a/build.txt b/build.txt new file mode 100644 index 000000000000..9f79cc175f57 --- /dev/null +++ b/build.txt @@ -0,0 +1 @@ ++hpu.synapse.v1.19.0 diff --git a/csrc/fp_quantizer/fp_quantize.cpp b/csrc/fp_quantizer/fp_quantize.cpp index 6962b8050f51..1a887b50e1a3 100644 --- a/csrc/fp_quantizer/fp_quantize.cpp +++ b/csrc/fp_quantizer/fp_quantize.cpp @@ -22,25 +22,20 @@ stochastic_rounding); \ } -at::Tensor quantize(torch::Tensor& val, +at::Tensor quantize(torch::Tensor& out, + torch::Tensor& val, int group_size, int stochastic_rounding, int q_bits, int q_mantisa_bits) { int total_elems = at::numel(val); - auto options = at::TensorOptions() - .dtype(torch::kInt8) - .layout(val.layout()) - .device(val.device()) - .requires_grad(false); float q_range = q_bits == 8 ? (q_mantisa_bits == 3 ? 480.0 : 114688.0) : // fp8 ranges (q_bits == 12 ? 510.0 : // fp12 range (q_bits == 6 ? 28.0 : // fp6 range 6.0)); // fp4 range (using power 2); TODO (Reza): add the power-4 // in case accuracy is not matching! int num_groups = total_elems / group_size; - auto out = torch::empty({num_groups, group_size * q_bits / 8 + 4}, options); DISPATCH_QUANTIZE(kHalf, __half, 23, 8); #ifdef BF16_AVAILABLE @@ -108,9 +103,22 @@ void selective_dequantize(torch::Tensor& val, #endif } +at::Tensor get_scales(torch::Tensor& out, int num_groups) +{ + auto options = at::TensorOptions() + .dtype(torch::kFloat) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + auto scales = + torch::from_blob(out.data_ptr(), {num_groups, 1}, {out.stride(0) / 4, 1}, options); + return scales; +} + PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("quantize", &quantize, "quantize function"); m.def("dequantize", &dequantize, "dequantize function"); + m.def("get_scales", &get_scales, "get scales function"); m.def("selective_dequantize", &selective_dequantize, "selective dequantize function"); } diff --git a/csrc/fp_quantizer/fp_quantize.cu b/csrc/fp_quantizer/fp_quantize.cu index cca063956167..66ea7392e011 100644 --- a/csrc/fp_quantizer/fp_quantize.cu +++ b/csrc/fp_quantizer/fp_quantize.cu @@ -15,7 +15,9 @@ #include #include +#ifdef BF16_AVAILABLE #include +#endif #include using ROp = reduce::ROpType; diff --git a/csrc/fp_quantizer/includes/fp_quantize.h b/csrc/fp_quantizer/includes/fp_quantize.h index 507252d6e722..60c75541f603 100644 --- a/csrc/fp_quantizer/includes/fp_quantize.h +++ b/csrc/fp_quantizer/includes/fp_quantize.h @@ -10,7 +10,9 @@ #include +#ifdef BF16_AVAILABLE #include +#endif #include #include diff --git a/csrc/transformer/inference/csrc/pt_binding.cpp b/csrc/transformer/inference/csrc/pt_binding.cpp index 1b9f91cd9c88..db9ce43ec163 100644 --- a/csrc/transformer/inference/csrc/pt_binding.cpp +++ b/csrc/transformer/inference/csrc/pt_binding.cpp @@ -452,14 +452,17 @@ std::vector ds_softmax_context(at::Tensor& query_key_value, unsigned layer_id, unsigned num_layers, at::Tensor& alibi, - float rope_theta) + float rope_theta, + bool is_prompt, + std::optional token_idx, + std::optional position_ids) { unsigned bsz = query_key_value.size(0); unsigned seq_len = query_key_value.size(1); int k = query_key_value.size(2) / (heads + 2 * (num_kv > 0 ? num_kv : heads)); unsigned hidden_dim = heads * k; - bool is_prompt = (seq_len > 1); + is_prompt = (seq_len > 1); if (is_prompt) InferenceContext::Instance().reset_tokens(seq_len); unsigned soft_len = InferenceContext::Instance().current_tokens(); @@ -2028,7 +2031,7 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) "DeepSpeed memory allocation for GPT inference with " #_name " (CUDA)"); \ m.def("dequantize_" #_name, \ &ds_dequantize<_dtype>, \ - "DeepSpeed dequantize with " #_name " (CUDA)") + "DeepSpeed dequantize with " #_name " (CUDA)"); DEF_OPS(fp32, float); DEF_OPS(fp16, __half); diff --git a/deepspeed/checkpoint/zero_checkpoint.py b/deepspeed/checkpoint/zero_checkpoint.py index 6730b93dfd4f..c5d4102859fb 100644 --- a/deepspeed/checkpoint/zero_checkpoint.py +++ b/deepspeed/checkpoint/zero_checkpoint.py @@ -105,7 +105,7 @@ def _strip_tensor_paddings(self, sd): if group_paddings[key] == 0: continue for state_name, state_value in group_state.items(): - if state_name != "step" and torch.is_tensor(state_value): + if state_name != "step" and torch.is_tensor(state_value) and state_value.dim(): raw_length = state_value.numel() - group_paddings[key] group_state[state_name] = torch.narrow(state_value, 0, 0, raw_length).clone() else: diff --git a/deepspeed/comm/torch.py b/deepspeed/comm/torch.py index 14d4f3847315..76ef925e606b 100644 --- a/deepspeed/comm/torch.py +++ b/deepspeed/comm/torch.py @@ -20,6 +20,12 @@ DS_COMM_REDUCE_OFF = False +def disable_compiler_collective(func): + if required_torch_version(min_version=2.3): + return func + return compiler.disable(func) + + def build_shm_op(): builder = get_accelerator().create_op_builder("ShareMemCommBuilder") if builder is None or not deepspeed.ops.__compatible_ops__[builder.NAME]: @@ -114,7 +120,7 @@ def __init__(self, backend, timeout, init_method, rank=-1, world_size=-1, name=' self.shm_comm_op.initialize(self.get_world_size(), self.get_rank()) @classmethod - @compiler.disable + @disable_compiler_collective def get_all_gather_function(self): if hasattr(torch.distributed, "all_gather_into_tensor"): return torch.distributed.all_gather_into_tensor @@ -123,7 +129,7 @@ def get_all_gather_function(self): return None @classmethod - @compiler.disable + @disable_compiler_collective def get_reduce_scatter_function(self): if hasattr(torch.distributed, "reduce_scatter_tensor"): return torch.distributed.reduce_scatter_tensor @@ -146,18 +152,17 @@ def init_process_group(self, backend, timeout, init_method, rank, world_size): world_size=world_size) self.using_mpi = torch.distributed.get_backend() == 'mpi' - @compiler.disable + @disable_compiler_collective def all_reduce(self, tensor, op=torch.distributed.ReduceOp.SUM, group=None, async_op=False): op = self._reduce_op(op) return torch.distributed.all_reduce(tensor=tensor, op=op, group=group, async_op=async_op) - @compiler.disable def inference_all_reduce(self, tensor, op, group=None): if self.shm_comm_op == None or self.shm_comm_op.inference_all_reduce(tensor, op) == -1: op = self._reduce_op(op) return torch.distributed.all_reduce(tensor=tensor, op=op, group=group, async_op=False) - @compiler.disable + @disable_compiler_collective def all_reduce_coalesced(self, tensors, op=torch.distributed.ReduceOp.SUM, group=None, async_op=False): """ proxy func to torch.distributed.all_reduce_coalesced, which is included in PyTorch 1.13 and above @@ -168,7 +173,7 @@ def all_reduce_coalesced(self, tensors, op=torch.distributed.ReduceOp.SUM, group op = self._reduce_op(op) return torch.distributed.all_reduce_coalesced(tensors=tensors, op=op, group=group, async_op=async_op) - @compiler.disable + @disable_compiler_collective def reduce(self, tensor, dst, op=ReduceOp.SUM, group=None, async_op=False): if DS_COMM_REDUCE_OFF: if int(os.getenv('RANK', '0')) == 0: @@ -176,7 +181,7 @@ def reduce(self, tensor, dst, op=ReduceOp.SUM, group=None, async_op=False): return Noop() return torch.distributed.reduce(tensor=tensor, dst=dst, op=self._reduce_op(op), group=group, async_op=async_op) - @compiler.disable + @disable_compiler_collective def reduce_scatter(self, output, input_list, op=ReduceOp.SUM, group=None, async_op=False): if DS_COMM_REDUCE_SCATTER_OFF: if int(os.getenv('RANK', '0')) == 0: @@ -189,7 +194,7 @@ def reduce_scatter(self, output, input_list, op=ReduceOp.SUM, group=None, async_ group=group, async_op=async_op) - @compiler.disable + @disable_compiler_collective def broadcast(self, tensor, src, group=None, async_op=False): if DS_COMM_BROADCAST_OFF: if int(os.getenv('RANK', '0')) == 0: @@ -198,7 +203,7 @@ def broadcast(self, tensor, src, group=None, async_op=False): else: return torch.distributed.broadcast(tensor=tensor, src=src, group=group, async_op=async_op) - @compiler.disable + @disable_compiler_collective def all_gather(self, tensor_list, tensor, group=None, async_op=False): if DS_COMM_ALL_GATHER_OFF: if int(os.getenv('RANK', '0')) == 0: @@ -207,7 +212,7 @@ def all_gather(self, tensor_list, tensor, group=None, async_op=False): else: return torch.distributed.all_gather(tensor_list=tensor_list, tensor=tensor, group=group, async_op=async_op) - @compiler.disable + @disable_compiler_collective def all_gather_into_tensor(self, output_tensor, input_tensor, group=None, async_op=False): if self.has_all_gather_into_tensor(): return self.all_gather_function(output_tensor=output_tensor, @@ -215,7 +220,7 @@ def all_gather_into_tensor(self, output_tensor, input_tensor, group=None, async_ group=group, async_op=async_op) - @compiler.disable + @disable_compiler_collective def all_gather_base(self, output_tensor, input_tensor, group=None, async_op=False): if DS_COMM_ALL_GATHER_OFF: if int(os.getenv('RANK', '0')) == 0: @@ -233,7 +238,7 @@ def all_gather_base(self, output_tensor, input_tensor, group=None, async_op=Fals "please consider upgrading your pytorch installation.") pass - @compiler.disable + @disable_compiler_collective def all_gather_coalesced(self, output_tensors, input_tensors, group=None, async_op=False): """""" assert len(output_tensors) == len(input_tensors), "" @@ -257,7 +262,7 @@ def all_gather_coalesced(self, output_tensors, input_tensors, group=None, async_ else: reqs[-1].wait() - @compiler.disable + @disable_compiler_collective def reduce_scatter_tensor(self, output_tensor, input_tensor, op=ReduceOp.SUM, group=None, async_op=False): if self.has_reduce_scatter_tensor(): return self.reduce_scatter_function(output_tensor, @@ -271,7 +276,7 @@ def reduce_scatter_tensor(self, output_tensor, input_tensor, op=ReduceOp.SUM, gr "please consider upgrading your pytorch installation.") pass - @compiler.disable + @disable_compiler_collective def all_to_all_single(self, output, input, @@ -286,27 +291,27 @@ def all_to_all_single(self, group=group, async_op=async_op) - @compiler.disable + @disable_compiler_collective def all_to_all(self, output_tensor_list, input_tensor_list, group=None, async_op=False): return torch.distributed.all_to_all(output_tensor_list, input_tensor_list, group=group, async_op=async_op) - @compiler.disable + @disable_compiler_collective def send(self, tensor, dst, group=None, tag=0): return torch.distributed.send(tensor=tensor, dst=dst, group=group, tag=tag) - @compiler.disable + @disable_compiler_collective def recv(self, tensor, src=None, group=None, tag=0): return torch.distributed.recv(tensor=tensor, src=src, group=group, tag=tag) - @compiler.disable + @disable_compiler_collective def isend(self, tensor, dst, group=None, tag=0): return torch.distributed.isend(tensor=tensor, dst=dst, group=group, tag=tag) - @compiler.disable + @disable_compiler_collective def irecv(self, tensor, src=None, group=None, tag=0): return torch.distributed.irecv(tensor=tensor, src=src, group=group, tag=tag) - @compiler.disable + @disable_compiler_collective def gather(self, tensor, gather_list=None, dst=0, group=None, async_op=False): return torch.distributed.gather(tensor=tensor, gather_list=gather_list, @@ -314,7 +319,7 @@ def gather(self, tensor, gather_list=None, dst=0, group=None, async_op=False): group=group, async_op=async_op) - @compiler.disable + @disable_compiler_collective def scatter(self, tensor, scatter_list=None, src=0, group=None, async_op=False): return torch.distributed.scatter(tensor=tensor, scatter_list=scatter_list, @@ -322,13 +327,13 @@ def scatter(self, tensor, scatter_list=None, src=0, group=None, async_op=False): group=group, async_op=async_op) - @compiler.disable + @disable_compiler_collective def barrier(self, group=torch.distributed.GroupMember.WORLD, async_op=False, device_ids=None): if group is None: group = torch.distributed.GroupMember.WORLD return torch.distributed.barrier(group=group, async_op=async_op, device_ids=device_ids) - @compiler.disable + @disable_compiler_collective def monitored_barrier(self, group=torch.distributed.GroupMember.WORLD, timeout=None, wait_all_ranks=False): if group is None: group = torch.distributed.GroupMember.WORLD diff --git a/deepspeed/inference/config.py b/deepspeed/inference/config.py index 1d5018aaa75b..c6c69bd6dd77 100644 --- a/deepspeed/inference/config.py +++ b/deepspeed/inference/config.py @@ -184,6 +184,15 @@ class DeepSpeedInferenceConfig(DeepSpeedConfigModel): values for :any:`DeepSpeedMoEConfig`. """ + keep_module_on_host: bool = False + """ + When loading checkpoints to model parameters, they are moved to the device. In large very models + this might fill the device and cause OOM. Setting this flag to true, will keep checkpoints on + host and not move them directly to the device (giving an option to quantize checkpoint data before + moving it to the device for example). + Set only for models with injection policies and auto TP. + """ + quant: QuantizationConfig = {} """ NOTE: only works for int8 dtype. diff --git a/deepspeed/inference/engine.py b/deepspeed/inference/engine.py index 5cdd99ff0b90..5c71a574c3d8 100755 --- a/deepspeed/inference/engine.py +++ b/deepspeed/inference/engine.py @@ -13,6 +13,7 @@ from packaging import version as pkg_version from deepspeed.runtime.checkpoint_engine.torch_checkpoint_engine import TorchCheckpointEngine from deepspeed.utils.timer import SynchronizedWallClockTimer +from deepspeed.runtime.compiler import is_compile_supported from ..runtime.state_dict_factory import SDLoaderFactory from ..runtime.weight_quantizer import WeightQuantization @@ -51,12 +52,7 @@ def __init__(self, model, config): DS_INFERENCE_ENABLED = True super().__init__() - - # Have to import here because inference_module is a global, but python - # globals only work at the module level and will not be updated unless - # we import it each time we init a new inference engine. - from ..model_implementations.transformers.ds_transformer import inference_module - if inference_module is not None: + if DeepSpeedTransformerInference.workspace is not None: self.destroy() self.module = model @@ -172,7 +168,7 @@ def __init__(self, model, config): is_meta_device = hasattr(self.module, "device") and self.module.device.type == 'meta' if is_meta_device: self.module.to_empty(device=device) - else: + elif not config.keep_module_on_host: self.module.to(device) if config.tensor_parallel.tp_size > 1: @@ -180,22 +176,21 @@ def __init__(self, model, config): dist.broadcast(_rng_state, 0) get_accelerator().set_rng_state(_rng_state.cpu()) - if config.tensor_parallel.tp_size > 1: + if config.enable_cuda_graph and get_accelerator().device_name() == 'hpu': + self.module = get_accelerator().wrap_in_hpu_graph(self.module) + elif config.tensor_parallel.tp_size > 1: assert not config.enable_cuda_graph, "Cuda graph is not supported for model parallelism" # Check if local CUDA graphs can be created in replacement modules self.local_cuda_graph = self._local_cuda_graph_used(self.module) + self._is_compiled = False def destroy(self): - # Have to import here because inference_module is a global, but python - # globals only work at the module level and will not be updated unless - # we import it each time we init a new inference engine. - from ..model_implementations.transformers.ds_transformer import inference_module DeepSpeedTransformerInference.layer_id = 0 DeepSpeedSelfAttention.num_layers = 0 - if inference_module is not None: - inference_module.release_workspace() - inference_module = None + if DeepSpeedTransformerInference.workspace.is_allocated(): + DeepSpeedTransformerInference.workspace.release_workspace() + DeepSpeedTransformerInference.workspace = None def profile_model_time(self, use_cuda_events=True): if not self.model_profile_enabled and not self._config.enable_cuda_graph: @@ -321,7 +316,7 @@ def _validate_args(self, mpu, replace_with_kernel_inject): if self._config.checkpoint is not None and not isinstance(self._config.checkpoint, (str, dict)): raise ValueError(f"checkpoint must be None, str or dict, got {type(self._config.checkpoint)}") - supported_dtypes = [None, torch.half, torch.int8, torch.float] + supported_dtypes = [None, torch.half, torch.int8, torch.float, torch.bfloat16] if self._config.dtype not in supported_dtypes: raise ValueError(f"{self._config.dtype} not supported, valid dtype: {supported_dtypes}") @@ -589,7 +584,8 @@ def forward(self, *inputs, **kwargs): **kwargs: variable length keyword arguments """ start = None - if self.model_profile_enabled and get_accelerator().device_name() == 'cuda' and self._config.enable_cuda_graph: + if self.model_profile_enabled and (get_accelerator().device_name() == 'cuda' or get_accelerator().device_name() == 'hpu') and \ + self._config.enable_cuda_graph: get_accelerator().synchronize() start = time.time() @@ -634,3 +630,19 @@ def _generate(self, *inputs, **kwargs): ) return self.module.generate(*inputs, **kwargs) + + def compile(self, backend=get_accelerator().get_compile_backend(), compile_kwargs={}) -> None: + """ + Compile the module using the specified backend and kwargs. + """ + if not is_compile_supported(): + raise RuntimeError("compile is not supported in your version of PyTorch.") + + if self._is_compiled: + return + self.module.compile(backend=backend, **compile_kwargs) + self._is_compiled = True + + @property + def is_compiled(self) -> bool: + return self._is_compiled diff --git a/deepspeed/launcher/runner.py b/deepspeed/launcher/runner.py index 0ca89dd0497a..821ddb42a28d 100755 --- a/deepspeed/launcher/runner.py +++ b/deepspeed/launcher/runner.py @@ -31,7 +31,7 @@ from deepspeed.accelerator import get_accelerator DLTS_HOSTFILE = "/job/hostfile" -EXPORT_ENVS = ['MLFLOW', 'PYTHON', 'MV2', 'UCX'] +EXPORT_ENVS = ['MLFLOW', 'NCCL', 'PYTHON', 'MV2', 'UCX'] EXPORT_ENVS += NEBULA_EXPORT_ENVS DEEPSPEED_ENVIRONMENT_NAME = os.getenv("DS_ENV_FILE", ".deepspeed_env") DEEPSPEED_ENVIRONMENT_PATHS = [os.path.expanduser("~"), '.'] diff --git a/deepspeed/linear/__init__.py b/deepspeed/linear/__init__.py index a27f1c3eaee7..9931a95a0a40 100644 --- a/deepspeed/linear/__init__.py +++ b/deepspeed/linear/__init__.py @@ -5,3 +5,4 @@ from .optimized_linear import OptimizedLinear from .config import LoRAConfig, QuantizationConfig +from .context_manager import Init, init_lora diff --git a/deepspeed/linear/config.py b/deepspeed/linear/config.py index ae9050a3c92b..7bbda92c75be 100644 --- a/deepspeed/linear/config.py +++ b/deepspeed/linear/config.py @@ -3,7 +3,10 @@ # DeepSpeed Team -from dataclasses import dataclass +from dataclasses import dataclass, field +from typing import List + +import torch @dataclass @@ -12,15 +15,24 @@ class LoRAConfig: Configuration settings for LoRAOptimizedLinear. Attributes: - lora_r (int): LoRA attention dimension, also know as the rank. Defaults is 64. + lora_r (int): LoRA attention dimension, also known as the rank. Defaults is 64. lora_alpha (float): LoRA scaling factor, default is 16. base_weight_sharding (int): The degree to which the base weights are sharded, should typically be set to the data-parallel world size to maximize the memory reduction benefits. Defaults to 1, which means this feature is disabled. + offload (bool): offload frozen parameters to cpu when not in use + offload_ratio (float): ratio of parameters to offload to cpu when not in use + delay_lora_init (bool): initialize lora parameters at time of model init or allow manual init later + target_mods (str): target module names to apply LoRA to, defaults to llama-3.1 arch """ lora_r: int = 64 lora_alpha: float = 16. base_weight_sharding: int = 1 + offload: bool = False + offload_ratio: float = 0.0 + delay_lora_init: bool = False + target_mods: List[str] = field( + default_factory=lambda: ['q_proj', 'k_proj', 'v_proj', 'o_proj', 'gate_proj', 'up_proj', 'down_proj']) @dataclass @@ -32,8 +44,15 @@ class QuantizationConfig: Attributes: q_bits (int): The number of bits used for quantization. Default is 8. mantissa_bits (int): The number of bits reserved for the mantissa in fixed-point quantization. Default is 3. - group_size (int): The size of the group used for quantization. Default is 512. + group_size (int): The number of elements used for quantization. Default is 512. + q_dtype (torch.dtype): The data type to quantize to. Default is uint8. (in CUDA, buffers are allocated as + uint8, but inside the kernels the quantization is done to fp8) + q_range_dtype (torch.dtype): The data type used to calculate the quantization range. In some cases/accelerators, + the range is different from the type we quantize to (like fp8 and fp8uz). + Default is uint8. """ q_bits: int = 8 mantissa_bits: int = 3 group_size: int = 512 + q_dtype: torch.dtype = torch.uint8 + q_range_dtype: torch.dtype = torch.uint8 diff --git a/deepspeed/linear/context_manager.py b/deepspeed/linear/context_manager.py new file mode 100644 index 000000000000..204fa0fe9c1d --- /dev/null +++ b/deepspeed/linear/context_manager.py @@ -0,0 +1,90 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .optimized_linear import LoRAOptimizedLinear, OptimizedLinear + +import torch + +try: + import transformers +except ImportError: + transformers = None + + +def init_lora(model): + model.requires_grad_(False) + for m in model.modules(): + if isinstance(m, LoRAOptimizedLinear): + m.init_lora() + + +class Init(object): + """ + Init context wrapper similar in style to zero.Init. Allows for injecting OptimizedLinear during model + construction which will shard base weights and reduce overall memory usage during model init. Primarily + useful when initializing a model via transformers.AutoModelForCausalLM. + + Example usage: + lora_config = deepspeed.linear.LoRAConfig(..) + quant_config = deepspeed.linear.QuantizationConfig(..) + with deepspeed.linear.Init(lora_config=lora_config, quant_config=quant_config): + model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3.1-405B") + + """ + + def __init__(self, lora_config=None, quant_config=None): + self._orig_nn_linear = torch.nn.Linear + self._orig_causallm_pretrained = None + if transformers != None: + self._orig_causallm_pretrained = transformers.AutoModelForCausalLM.from_pretrained + self._orig_causallm_config = transformers.AutoModelForCausalLM.from_config + self.lora_config = lora_config + self.quant_config = quant_config + self._post_init_complete = False + + def __enter__(self): + + class OptLinearWrapper: + _orig_nn_linear = self._orig_nn_linear + _lora_config = self.lora_config + _quant_config = self.quant_config + + def __new__(self, *args, **kwargs): + self._lora_config.delay_lora_init = True + kwargs['lora_config'] = self._lora_config + kwargs['quantization_config'] = self._quant_config + kwargs['linear_cls'] = self._orig_nn_linear + return OptimizedLinear(*args, **kwargs) + + def _model_init(model): + if self.lora_config != None: + init_lora(model) + self._post_init_complete = True + return model + + # ensures non-lora params are frozen and lora weights are initialized + def from_pretrained(*args, **kwargs): + model = self._orig_causallm_pretrained(*args, **kwargs) + return _model_init(model) + + def from_config(*args, **kwargs): + model = self._orig_causallm_config(*args, **kwargs) + return _model_init(model) + + torch.nn.Linear = OptLinearWrapper + if transformers != None: + transformers.AutoModelForCausalLM.from_pretrained = from_pretrained + transformers.AutoModelForCausalLM.from_config = from_config + + def __exit__(self, *args, **kwargs): + torch.nn.Linear = self._orig_nn_linear + if not self._post_init_complete: + print('WARNING: For some reason LoRA modules are not initialized, this is usually done automatically ' + 'if using transformers via (AutoModelForCausalLM from_pretrained/from_config). ' + 'You must call `init_lora` on each module in order to use DeepSpeed LoRA, otherwise ' + 'you will error out during runtime.') + else: + transformers.AutoModelForCausalLM.from_pretrained = self._orig_causallm_pretrained + transformers.AutoModelForCausalLM.from_config = self._orig_causallm_config diff --git a/deepspeed/linear/optimized_linear.py b/deepspeed/linear/optimized_linear.py index e982785a8122..3720196aa255 100644 --- a/deepspeed/linear/optimized_linear.py +++ b/deepspeed/linear/optimized_linear.py @@ -40,7 +40,9 @@ def __new__(self, bias: bool = False, lora_config: LoRAConfig = None, quantization_config: QuantizationConfig = None, - dtype=torch.bfloat16): + device=None, + dtype=torch.bfloat16, + linear_cls=nn.Linear): if quantization_config is not None and not is_dataclass(quantization_config): raise ValueError(f"Expecting QuantizationConfig but received {type(quantization_config)}") @@ -48,7 +50,7 @@ def __new__(self, raise ValueError(f"Expecting LoRAConfig but received {type(lora_config)}") if lora_config is None and quantization_config is None: # Everything disabled, fall back to normal nn.Linear - self = nn.Linear(input_dim, output_dim, bias=bias, dtype=dtype) + self = linear_cls(input_dim, output_dim, bias=bias, dtype=dtype, device=device) elif lora_config: # lora enabled, quantization may or may not be @@ -57,7 +59,9 @@ def __new__(self, bias=bias, lora_config=lora_config, quantization_config=quantization_config, - dtype=dtype) + dtype=dtype, + device=device, + linear_cls=linear_cls) elif quantization_config: # only quantization enabled, no lora @@ -78,57 +82,121 @@ def __init__(self, lora_config: LoRAConfig = None, quantization_config: QuantizationConfig = None, device=None, - dtype=torch.bfloat16): + dtype=torch.bfloat16, + linear_cls=nn.Linear): super().__init__() self.input_dim = input_dim self.output_dim = output_dim self.bias = bias self.lora_config = lora_config self.quantization_config = quantization_config - device = get_accelerator().current_device_name() if device is None else device + self.device = get_accelerator().current_device_name() if device is None else device + self.linear_cls = linear_cls + self.dtype = dtype assert self.lora_config is not None, "DSOptimizedLinear requires a LoRA config" - + assert not self.bias, "bias=True is not supported by LoRAOptimizedLinear" self.zero_shards = self.lora_config.base_weight_sharding self.sharded_weight_size = int(float(self.input_dim) // self.zero_shards) - w = torch.nn.Parameter(torch.empty((self.output_dim, self.sharded_weight_size), dtype=dtype)) - torch.nn.init.xavier_uniform_(w) + if self.zero_shards > 1: + assert self.zero_shards == dist.get_world_size( + ), "base weight sharding is only supported across world size" + w = torch.nn.Parameter(torch.empty(self.output_dim * self.sharded_weight_size, dtype=dtype), + requires_grad=False) + else: + w = torch.nn.Parameter(torch.empty((self.output_dim, self.input_dim), dtype=dtype), requires_grad=False) + torch.nn.init.xavier_uniform_(w.reshape(self.sharded_weight_size, self.output_dim)) if self.quantization_config is not None: assert dtype == torch.bfloat16, "only bfloat16 is supported when using quantization" - self.base_weight = QuantizedParameter(w, quantization_config=quantization_config) + self.weight = QuantizedParameter(w, quantization_config=quantization_config) else: - self.base_weight = w + self.weight = w + + self.disabled = False + self._initialized = False + if not self.lora_config.delay_lora_init: + self.init_lora() + + def disable(self): + self.disabled = True + self.weight = torch.nn.Parameter(torch.empty((self.output_dim, self.input_dim), dtype=self.dtype), + requires_grad=False) + + def init_lora(self): + if self.disabled: + return + + if self.quantization_config is not None: + # ensure quant-param wasn't stripped, in some cases transformers will do this during model init + if not isinstance(self.weight, QuantizedParameter): + self.weight = QuantizedParameter(self.weight, quantization_config=self.quantization_config) + + self._initialized = True + self.weight.requires_grad = False - self.base_weight.requires_grad = False + # Mark base weight to prevent broadcast and ensure proper offload behavior + self.weight.ds_optim_param = True + + self.lora_scaling_factor = self.lora_config.lora_alpha / self.lora_config.lora_r - # Use RS lora for now. - self.lora_scaling_factor = self.lora_config.lora_alpha / math.sqrt(self.lora_config.lora_r) # Keeping lora weights in bf16 precision for ease of training. - self.lora_weight_1 = nn.Linear(self.input_dim, - self.lora_config.lora_r, - bias=self.bias, - device=device, - dtype=dtype) - self.lora_weight_2 = nn.Linear(self.lora_config.lora_r, - self.output_dim, - bias=self.bias, - device=device, - dtype=dtype) + self.lora_weight_1 = self.linear_cls(self.input_dim, + self.lora_config.lora_r, + bias=self.bias, + device=self.device, + dtype=self.dtype) + self.lora_weight_2 = self.linear_cls(self.lora_config.lora_r, + self.output_dim, + bias=self.bias, + device=self.device, + dtype=self.dtype) + + # initialize "A" with kaiming uniform and "B" with zeros following this + # https://github.com/huggingface/peft/blob/62122b5add8d6892f70c82eaef2147a6ba33b90b/src/peft/tuners/lora/layer.py#L155 + nn.init.kaiming_uniform_(self.lora_weight_1.weight, a=math.sqrt(5)) + nn.init.zeros_(self.lora_weight_2.weight) self.lora_weight_1.weight.requires_grad = True self.lora_weight_2.weight.requires_grad = True + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, + error_msgs): + if not any([target in prefix for target in self.lora_config.target_mods]): + # module does not match any target_mods, we must revert to normal nn.Linear via disable + self.disable() + return super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, + unexpected_keys, error_msgs) + + if self.zero_shards > 1: + if not dist.is_initialized(): + raise RuntimeError( + "attempting to use optimized linear base weight sharding but torch-distributed is not initialized, please init first." + ) + rank = dist.get_rank() + shape_local = self.output_dim * self.sharded_weight_size + base_weight_name = f"{prefix}weight" + incoming_param = state_dict[base_weight_name] + state_dict[base_weight_name] = incoming_param.flatten().narrow(0, rank * shape_local, shape_local) + + return super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, + error_msgs) + def full_weight(self): - # This assumes weights are evenly sharded across gpus. which might not be correct. - # in that case, we should flatten before all_gather. - local_weight = self.base_weight.dequantized() if isinstance(self.base_weight, - QuantizedParameter) else self.base_weight - tensor_list = [ - torch.zeros_like(local_weight, device=local_weight.device, dtype=local_weight.dtype) - for _ in range(self.zero_shards) - ] - dist.all_gather(tensor_list, local_weight) - weight = nn.Parameter(torch.cat([tensor for tensor in tensor_list], dim=1)) - return weight + base_weight = self.weight + if getattr(base_weight, 'ds_offload', False): + # move to gpu so we can dequant and all-gather + assert base_weight.device == torch.device('cpu'), \ + f"expected base weight on cpu but found {base_weight.device}" + base_weight.offload(revert=True) + local_weight = base_weight.dequantized() if isinstance(base_weight, QuantizedParameter) else base_weight + base_weight.offload() + else: + local_weight = base_weight.dequantized() if isinstance(base_weight, QuantizedParameter) else base_weight + + tensor_out = torch.empty(self.output_dim * self.input_dim, + dtype=local_weight.dtype, + device=local_weight.device) + dist.all_gather_into_tensor(tensor_out, local_weight) + return tensor_out.reshape(self.output_dim, self.input_dim) def linear_without_F_linear(self, input, weight): output = torch.mm(input.reshape(-1, input.shape[-1]), weight) @@ -136,14 +204,18 @@ def linear_without_F_linear(self, input, weight): return output def forward(self, input_tensor): + if self.disabled: + return F.linear(input_tensor, self.weight) + assert self._initialized, "init_lora was never called, please initialize before proceeding" + # Gather the sharded base weight if self.zero_shards > 1: with torch.no_grad(): base_weight = self.full_weight() elif self.quantization_config: - base_weight = self.base_weight.dequantized() + base_weight = self.weight.dequantized() else: - base_weight = self.base_weight + base_weight = self.weight base_weight_output = F.linear(input_tensor, base_weight) lora_output = self.lora_weight_2(self.lora_weight_1(input_tensor)) diff --git a/deepspeed/linear/quantization.py b/deepspeed/linear/quantization.py index f5343af45fb8..2023601be281 100644 --- a/deepspeed/linear/quantization.py +++ b/deepspeed/linear/quantization.py @@ -51,30 +51,37 @@ def __new__( self.quantizer = quantizer else: # if FPQuantizerBuilder is not compatible in this env this init will fail - self.quantizer = FP_Quantize(group_size=self.quantization_config.group_size) + self.quantizer = FP_Quantize(quantization_config=self.quantization_config) self._ensure_quantized(self) return self def _ensure_quantized(self, tensor: torch.Tensor): # If the tensor is on the accelerator and is not quantized, then quantize it in-place. - if get_accelerator().on_accelerator(tensor) and tensor.dtype != torch.int8: + if get_accelerator().on_accelerator(tensor) and tensor.dtype != self.quantization_config.q_dtype: with get_accelerator().stream(get_accelerator().current_stream(tensor.device)): tensor.data = self.quantizer.quantize(tensor.data, q_bits=self.quantization_config.q_bits, q_mantisa_bits=self.quantization_config.mantissa_bits) - assert tensor.dtype == torch.int8 + assert tensor.dtype == self.quantization_config.q_dtype def dequantized(self) -> torch.Tensor: """ Return a tensor containing the dequantized weights of this parameter. """ - if get_accelerator().on_accelerator(self.data) and self.data.dtype == torch.int8: + if get_accelerator().on_accelerator(self.data) and self.data.dtype == self.quantization_config.q_dtype: with get_accelerator().stream(get_accelerator().current_stream(self.data.device)): return self.quantizer.dequantize(self.data, q_bits=self.quantization_config.q_bits, q_mantisa_bits=self.quantization_config.mantissa_bits) return self.data + def offload(self, revert=False): + if getattr(self, 'ds_offload', False): + if revert: + self.data = self.to(get_accelerator().current_device_name()) + else: + self.data = self.to('cpu') + def __getstate__(self): state = self.__dict__ state["data"] = self.data @@ -104,7 +111,9 @@ def __copy__(self): return new_instance def cuda(self, device=None, non_blocking=False): - return self.to(device="cuda" if device is None else device, non_blocking=non_blocking) + device = "cuda" if device is None else device + self.quantizer.to(device, non_blocking=non_blocking) + return self.to(device, non_blocking=non_blocking) def to(self, *args, **kwargs): """ @@ -112,6 +121,7 @@ def to(self, *args, **kwargs): quantize it. """ tensor = super().to(*args, **kwargs) + self.quantizer.to(*args, **kwargs) self._ensure_quantized(tensor) return tensor diff --git a/deepspeed/model_implementations/transformers/ds_llama2.py b/deepspeed/model_implementations/transformers/ds_llama2.py index 7d9eb4113a8a..325bfb4f7e18 100644 --- a/deepspeed/model_implementations/transformers/ds_llama2.py +++ b/deepspeed/model_implementations/transformers/ds_llama2.py @@ -4,11 +4,8 @@ # DeepSpeed Team import torch -from deepspeed import comm as dist from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference -inference_module = None - class DeepSpeedLlama2Inference(DeepSpeedTransformerInference): """Initialize the DeepSpeed OPT Transformer Layer. @@ -27,18 +24,10 @@ def forward(self, *args, **kwargs): input = args[0] input_mask = None - # Allocate memory only on first layer forward - if self.config.layer_id == 0 and self._alloc_workspace: - self.allocate_workspace(self.config.hidden_size, self.config.heads, - input.size()[1], - input.size()[0], DeepSpeedTransformerInference.layer_id, self.config.mp_size, - self.config.bigscience_bloom, - dist.get_rank() if dist.is_initialized() else 0, self.config.max_out_tokens, - self.config.min_out_tokens) - self._alloc_workspace = False - get_present = True + self.allocate_workspace(input.size()) + # We set the prev key/value to None when there is a prompt if input.shape[1] > 1: self.layer_past = None diff --git a/deepspeed/model_implementations/transformers/ds_transformer.py b/deepspeed/model_implementations/transformers/ds_transformer.py index d87d0de997b5..070de81dad67 100644 --- a/deepspeed/model_implementations/transformers/ds_transformer.py +++ b/deepspeed/model_implementations/transformers/ds_transformer.py @@ -6,19 +6,18 @@ import torch import torch.nn as nn from deepspeed import comm as dist +from deepspeed.ops.transformer.inference.op_binding.layer_norm import LayerNormOp from deepspeed.utils.logging import log_dist from deepspeed.ops.transformer.inference.ds_mlp import DeepSpeedMLP from deepspeed.ops.transformer.inference.ds_attention import DeepSpeedSelfAttention, BloomSelfAttention +from deepspeed.ops.transformer.inference.op_binding.workspace import WorkspaceOp from deepspeed.accelerator import get_accelerator -from deepspeed.ops.op_builder import InferenceBuilder import deepspeed if deepspeed.HAS_TRITON: from deepspeed.ops.transformer.inference.triton.mlp import TritonMLP from deepspeed.ops.transformer.inference.triton.attention import TritonSelfAttention -inference_module = None - class DeepSpeedTransformerInference(nn.Module): """Initialize the DeepSpeed Transformer Layer. @@ -37,6 +36,7 @@ class DeepSpeedTransformerInference(nn.Module): for specific downstream tasks. """ layer_id = 0 + workspace = None def __init__(self, config, @@ -52,10 +52,6 @@ def __init__(self, DeepSpeedTransformerInference.layer_id += 1 data_type = torch.half if self.config.dtype == torch.int8 else self.config.dtype - global inference_module - if inference_module is None: - builder = InferenceBuilder() - inference_module = builder.load() if DeepSpeedTransformerInference.layer_id == 1: log_dist(f"DeepSpeed-Inference config: {self.config.__dict__}", [0]) @@ -87,23 +83,25 @@ def __init__(self, requires_grad=False) self.norm_b = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type, device=device), requires_grad=False) - self.layer_past = None - try: - if config.dtype == torch.float32: - self.allocate_workspace = inference_module.allocate_workspace_fp32 - elif config.dtype == torch.bfloat16: - self.allocate_workspace = inference_module.allocate_workspace_bf16 - else: - self.allocate_workspace = inference_module.allocate_workspace_fp32 - self._alloc_workspace = True - except AttributeError: - self.allocate_workspace = None - self._alloc_workspace = False + self.layer_norm = LayerNormOp() + if DeepSpeedTransformerInference.workspace is None: + DeepSpeedTransformerInference.workspace = WorkspaceOp(self.config) + self._should_allocate_workspace = True + + def allocate_workspace(self, size): + # Allocate memory only on first layer forward + if self.config.layer_id == 0 and self._should_allocate_workspace: + DeepSpeedTransformerInference.workspace.allocate_workspace( + self.config.hidden_size, self.config.heads, size[1], size[0], DeepSpeedTransformerInference.layer_id, + self.config.mp_size, self.config.bigscience_bloom, + dist.get_rank() if dist.is_initialized() else 0, self.config.max_out_tokens, + self.config.min_out_tokens) + self._should_allocate_workspace = False @classmethod def reset_cache(cls): - if inference_module is not None: - inference_module.reset_cache() + if cls.workspace is not None: + cls.workspace.reset_cache() def forward( self, @@ -136,23 +134,11 @@ def forward( input_mask = (input_mask if attn_mask is None else attn_mask) if attention_mask is None else attention_mask - # Allocate memory only on first layer forward - if self.config.layer_id == 0 and self._alloc_workspace: - self.allocate_workspace(self.config.hidden_size, self.config.heads, - input.size()[1], - input.size()[0], DeepSpeedTransformerInference.layer_id, self.config.mp_size, - self.config.bigscience_bloom, - dist.get_rank() if dist.is_initialized() else 0, self.config.max_out_tokens, - self.config.min_out_tokens) - self._alloc_workspace = False + self.allocate_workspace(input.size()) get_present = (get_present or get_key_value or use_cache) input_mask = input_mask if attention_mask is None else attention_mask - - # We set the prev key/value to None when there is a prompt - if input.shape[1] > 1: - self.layer_past = None - layer_past = layer_past if layer_past is not None else self.layer_past + layer_past = past_key_value if past_key_value is not None else layer_past head_mask = layer_head_mask if layer_head_mask is not None else head_mask attn_mask = None @@ -178,14 +164,14 @@ def forward( output_attentions, self.norm_w, self.norm_b, - alibi) + alibi, + **kwargs) presents = (key, value) - self.layer_past = presents if layer_past is None else None output = self.mlp(attention_output, input, inp_norm, self.attention.attn_ob) if not self.config.pre_layer_norm: - output = inference_module.layer_norm(output, self.norm_w, self.norm_b, self.config.epsilon) + output = self.layer_norm(output, self.norm_w, self.norm_b, self.config.epsilon) output = output.to(input_type) if get_present: diff --git a/deepspeed/module_inject/auto_tp.py b/deepspeed/module_inject/auto_tp.py index 1c76cbc0a6ef..04c8397c978e 100644 --- a/deepspeed/module_inject/auto_tp.py +++ b/deepspeed/module_inject/auto_tp.py @@ -17,9 +17,11 @@ from deepspeed.module_inject.tp_shard import get_shard_size, get_shard_size_list -def move(tensor, device): +def move(tensor, device, keep_module_on_host=False): if tensor.is_meta: - return torch.empty_like(tensor, device=device) + return torch.empty_like(tensor, device='cpu' if keep_module_on_host else device) + elif keep_module_on_host: + return tensor.to('cpu') if device != 'cpu' else tensor else: # Using new tensors help in freeing memory (after split for example) was done before by calling clone(). # Using copy=True instead of clone() will help in case of cpu --> cpu. @@ -134,7 +136,7 @@ def is_load_module(module): load_layer_names = [ "LPLayerNorm", "SharedEmbedding", "OPTLearnedPositionalEmbedding", "LlamaRMSNorm", "FalconLinear", "MistralRMSNorm", "T5LayerNorm", "MixtralRMSNorm", "Phi3RotaryEmbedding", "Phi3SuScaledRotaryEmbedding", - "Phi3RMSNorm", "YuanRMSNorm", "YuanRotaryEmbedding" + "Phi3RMSNorm", "YuanRMSNorm", "YuanRotaryEmbedding", "Qwen2RMSNorm" ] return module.__class__ in load_layers or module._get_name() in load_layer_names @@ -188,7 +190,14 @@ def load(module, state_dict, prefix, mp_group=None): class AutoTP(): - def __init__(self, module, all_reduce_linears, prefix, state_dict, linear_layer_setting, orig_layer_impl): + def __init__(self, + module, + all_reduce_linears, + prefix, + state_dict, + linear_layer_setting, + orig_layer_impl, + keep_module_on_host=False): self.module = module self.all_reduce_linears = all_reduce_linears self.prefix = prefix @@ -200,6 +209,7 @@ def __init__(self, module, all_reduce_linears, prefix, state_dict, linear_layer_ self.orig_layer_impl = orig_layer_impl self.linear_policies = None self.conv_linear_layer = False + self.keep_module_on_host = keep_module_on_host def in_module_list(module, module_list): for item in module_list: @@ -354,7 +364,8 @@ def _replace(self, child, name, conv_linear_layer): data = child.weight.data.split(get_shard_size_list( weight_shape[0] if self.conv_linear_layer else weight_shape[1], self.mp_size, name), dim=1) - data_dc = move(data[mp_replace.gpu_index], get_accelerator().current_device_name()).detach() + data_dc = move(data[mp_replace.gpu_index], + get_accelerator().current_device_name(), self.keep_module_on_host).detach() del data setattr(child, "replaced", True) @@ -363,9 +374,9 @@ def _replace(self, child, name, conv_linear_layer): torch.nn.parameter.Parameter(data_dc, requires_grad=False), dist.get_rank(), dist.get_world_size(), child.bias if child.bias is None else torch.nn.parameter.Parameter( move(child.bias, - get_accelerator().current_device_name())), self.mp_group) + get_accelerator().current_device_name(), self.keep_module_on_host)), self.mp_group) return LinearAllreduce(torch.nn.parameter.Parameter(data_dc, requires_grad=False), child.bias if child.bias is None else \ - torch.nn.parameter.Parameter(move(child.bias, get_accelerator().current_device_name())), self.mp_group) + torch.nn.parameter.Parameter(move(child.bias, get_accelerator().current_device_name(), self.keep_module_on_host)), self.mp_group) else: # if conv_linear_layer [weight_shape[1], weight_shape[0] // mp_size] @@ -378,22 +389,24 @@ def _replace(self, child, name, conv_linear_layer): #The copy is a regular copy, The shape of dst and src is the same data_dc = move( prepare_tp_fused_qkvw(self.module, child.weight.data, self.mp_size, mp_replace.gpu_index), - get_accelerator().current_device_name()) + get_accelerator().current_device_name(), self.keep_module_on_host) bias_data_dc = None if child.bias is None else move( prepare_tp_fused_qkvw(self.module, child.bias.data, self.mp_size, mp_replace.gpu_index), - get_accelerator().current_device_name()) + get_accelerator().current_device_name(), self.keep_module_on_host) else: data = child.weight.data.split(get_shard_size_list(weight_shape[0], self.mp_size, name), dim=1 if self.conv_linear_layer else 0) - data_dc = move(data[mp_replace.gpu_index], get_accelerator().current_device_name()).detach() + data_dc = move(data[mp_replace.gpu_index], + get_accelerator().current_device_name(), self.keep_module_on_host).detach() del data if child.bias is not None: bias_data = child.bias.data.split(get_shard_size_list( weight_shape[1] if self.conv_linear_layer else weight_shape[0], self.mp_size, name), dim=0) - bias_data = move(bias_data[mp_replace.gpu_index], get_accelerator().current_device_name()) + bias_data = move(bias_data[mp_replace.gpu_index], + get_accelerator().current_device_name(), self.keep_module_on_host) bias_data_dc = torch.nn.parameter.Parameter(bias_data, requires_grad=False) del bias_data else: diff --git a/deepspeed/module_inject/containers/base.py b/deepspeed/module_inject/containers/base.py index 83e109167ffe..ab6286325eed 100644 --- a/deepspeed/module_inject/containers/base.py +++ b/deepspeed/module_inject/containers/base.py @@ -203,6 +203,12 @@ def set_mlp(self, _h4h_w, _h4h_b, _4hh_w, _4hh_b): self._4hh_b = _4hh_b def set_layernorm(self, attn_nw, attn_nb, input_nw, input_nb): + #TODO SW-164572: remove below mark_step WA once SW-164573 is resolved. + if get_accelerator().device_name() == 'hpu': + import habana_frameworks.torch.hpu as thpu + if thpu.is_initialized(): + import habana_frameworks.torch.core as htcore + htcore.mark_step() self.attn_nw = attn_nw self.attn_nb = attn_nb self.input_nw = input_nw diff --git a/deepspeed/module_inject/policy.py b/deepspeed/module_inject/policy.py index 41df2b85dc0c..d9cb8c2d9eb5 100644 --- a/deepspeed/module_inject/policy.py +++ b/deepspeed/module_inject/policy.py @@ -27,7 +27,7 @@ class DSPolicy(ABC): _orig_layer_class = None def __init__(self): - self.cuda_graph_supported = False + self.cuda_graph_supported = False if get_accelerator().device_name() != 'hpu' else True @abstractmethod def attention(self): @@ -62,7 +62,7 @@ def __init__( # Type of normalization to perform norm_type=NormType.LayerNorm): super().__init__() - self.cuda_graph_supported = False + self.cuda_graph_supported = False if get_accelerator().device_name() != 'hpu' else True self.inference = inference self.linear_layer = linear_layer self.scale_attention = scale_attention diff --git a/deepspeed/module_inject/replace_module.py b/deepspeed/module_inject/replace_module.py index 85abd0217039..d93780a106ec 100644 --- a/deepspeed/module_inject/replace_module.py +++ b/deepspeed/module_inject/replace_module.py @@ -268,7 +268,8 @@ def replace_wo_policy(module, all_reduce_linears, prefix="", state_dict=None): #mp_replace = ReplaceWithTensorSlicing(mp_group=config.tensor_parallel.tp_group) # 1. Create AutoTP object - _autotp = AutoTP(module, all_reduce_linears, prefix, state_dict, linear_layer_setting, orig_layer_impl) + _autotp = AutoTP(module, all_reduce_linears, prefix, state_dict, linear_layer_setting, orig_layer_impl, + config.keep_module_on_host) # 2. Set the tensor parallelism config _autotp.set_tensor_parallel_config(config.tensor_parallel.tp_size, config.tensor_parallel.tp_group) diff --git a/deepspeed/module_inject/tp_shard.py b/deepspeed/module_inject/tp_shard.py index 6758c7a657f6..a2b0d5edb927 100644 --- a/deepspeed/module_inject/tp_shard.py +++ b/deepspeed/module_inject/tp_shard.py @@ -5,6 +5,8 @@ from deepspeed import comm as dist global num_kv_heads +# TODO: SW-184584 remove this WA. +is_old_shard_size = None def set_num_kv_heads(num): @@ -34,12 +36,17 @@ def get_num_attention_heads(): def get_shard_size(total_size, mp_size, name=None, rank=None): global num_kv_heads + # TODO: SW-184584 remove this WA. + global is_old_shard_size + if is_old_shard_size is None: + import os + is_old_shard_size = os.environ.get("HPU_DS_OLD_SHARD_SIZE", "1").lower() in ["true", "1"] last_linear = ["lm_head", "embed_out"] # When we have num_kv_heads defined, uneven division is possible, otherwise enforce near even division if rank == None: rank = dist.get_rank() - if num_kv_heads != None and total_size % num_kv_heads == 0 and "mlp" not in str(name) and str( - name) not in last_linear: + if num_kv_heads != None and (is_old_shard_size or (total_size % num_kv_heads == 0 and "mlp" not in str(name) + and str(name) not in last_linear)): my_slices = (num_kv_heads // mp_size) + (1 if rank < (num_kv_heads % mp_size) else 0) return total_size * my_slices // num_kv_heads else: diff --git a/deepspeed/moe/capacity_bins.py b/deepspeed/moe/capacity_bins.py new file mode 100644 index 000000000000..1e56f6b353af --- /dev/null +++ b/deepspeed/moe/capacity_bins.py @@ -0,0 +1,346 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import math +import torch +from typing import Union +from deepspeed import comm as dist +from deepspeed.utils import groups +from deepspeed.utils.bwc import bwc_tensor_model_parallel_world_size + + +class CapacityBins(torch.nn.Module): + """ CapacityBins - maps current capacity value into capacity bins. + + When using drop_tokens=false, the capacity at each iteration will differ since + we use a capacity to accommodate for the largest number of tokens sent to an expert. + This creates dynamic shapes tensors. + + The motivation for using bins is to reduce the dynamic shapes to a limited set, hence + being more friendly when running in non-eager mode (e.g., using compile). + + The minimum range of capacity is the optimal capacity where all tokens are evenly routed + among all experts. The maximum range of capacity is the worst-case capacity where all + tokens are routed to a single expert (unlikely, but a valid upper bound). + + This class maintains the current configured capacity bins. It also tracks bins usage info + which enables to dynamically update the capacity bins to optimize performance (i.e. to + minimize the number of dummy extra tokens that are routed). + + Upon initialization, if configured_bins provided, use configured_bins to initialize the bins. + Otherwise, the capacity bins are initialized to bins with exponentially growing width. + + Argument use_cpu forces capacity bins logic to be executed on the CPU (not on the accelerator). + When using torch.compile, this prevents potential graph breaks. + """ + + def __init__(self, + k: int, + num_experts: int, + num_capacity_bins: int, + capacity_bins_exp_base: float, + capacity_bins_alignment: int, + min_bin_size: int = 1, + configured_bins: Union[list, None] = None, + use_cpu=True) -> None: + super().__init__() + self.k = k + self.num_experts = num_experts + self.num_capacity_bins = num_capacity_bins + self.capacity_bins_exp_base = capacity_bins_exp_base + self.configured_alignment = capacity_bins_alignment + assert min_bin_size > 0, f'CapacityBins min_bin_size must be > 0, got {min_bin_size}' + self.min_bin_size = min_bin_size + if configured_bins is not None: + assert len(configured_bins) == self.num_capacity_bins, \ + f'Configured bins ({configured_bins}) does not match num capacity bins ({self.num_capacity_bins})' + assert all(bin_edge > 0 for bin_edge in configured_bins), \ + 'Configured bin edges must be > 0' + assert all(configured_bins[i] < configured_bins[i+1] for i in range(len(configured_bins)-1)), \ + 'Configured bin edges must be a strictly increasing list' + self.use_cpu = use_cpu + + # initialize usage stats + zero_bins = torch.zeros(num_capacity_bins, dtype=torch.long, device='cpu', requires_grad=False) + self.register_buffer('bins_usage', zero_bins.clone().detach()) + self.register_buffer('bins_usage_last', zero_bins.clone().detach()) + + # initialize bin edges + if configured_bins is not None: + self.register_buffer('capacity_bins', + torch.tensor(configured_bins, dtype=torch.long, device='cpu', requires_grad=False)) + else: + # we don't know the range of the capacity bins, therefore we create a zeroed tensor + # when we load from checkpoint, or during the first forward, we update the bins + # note that if the first element =0, it marks that capacity_bins is not initialized + self.register_buffer('capacity_bins', zero_bins.clone().detach()) + + # attribute self.device is the device to use for capacity bins logic, where attribute self.model_device + # is the device used by the model. attributes can be different in case use_cpu is configured. + self.device = None + self.model_device = None + + self.min_tokens_per_expert = None + self.max_tokens_per_expert = None + self.alignment = None + + def set_bins(self, bins: list): + with (torch.no_grad()): + # set the new capacity bins and clear the usage stats (not relevant for new bins) + self.capacity_bins.copy_(torch.tensor(bins, dtype=torch.long, device=self.device)) + self.bins_usage.zero_() + self.bins_usage_last.zero_() + + def get_stats(self, incremental=True): + + def is_usage_data_available(usage_tensor): + with torch.no_grad(): + return usage_tensor.sum().item() > 0 + + if not is_usage_data_available(self.bins_usage): + return None + + with torch.no_grad(): + # reduce stats across all workers; for that, we need to temporarily move stats to model device + bins_usage = self.bins_usage.clone().detach().to(self.model_device) + dist.all_reduce(bins_usage, op=dist.ReduceOp.SUM, group=groups._get_data_parallel_group()) + bins_usage = bins_usage.to(self.device) + + # incremental returns only the diff from last activation of get_stats() + if incremental: + delta_bins_usage = bins_usage + if is_usage_data_available(self.bins_usage_last): + delta_bins_usage -= self.bins_usage_last + self.bins_usage_last.copy_(bins_usage) + bins_usage = delta_bins_usage + + # stats are returned using cpu tensors + bins_usage = bins_usage.to('cpu') + bins_usage_list = bins_usage.tolist() + bins_edges = self.capacity_bins.clone().detach().to('cpu') + bins_edges_list = bins_edges.tolist() + stats = { + 'min_range': self.min_tokens_per_expert, + 'max_range': self.max_tokens_per_expert, + 'alignment': self.alignment, + 'min_bin_size': self.min_bin_size if self.min_bin_size is not None else 0, + 'edges': bins_edges, + 'usage': bins_usage, + 'summary': {f'bin{i}_{bins_edges_list[i]}': bins_usage_list[i] + for i in range(len(bins_usage))} + } + return stats + + def _save_device(self, device: str): + if self.device is None: + # set self.device to requested device for capacity bins logic. also keep device used by model + assert self.model_device is None, f'Expected model_device=None on 1st forward, but got {self.model_device}' + self.model_device = device + self.device = 'cpu' if self.use_cpu else self.model_device + + # move all model's buffers to device used for capacity bins logic + self.capacity_bins = self.capacity_bins.to(self.device) + self.bins_usage = self.bins_usage.to(self.device) + self.bins_usage_last = self.bins_usage_last.to(self.device) + + def get_binned_capacity(self, gate_output, capacity, update_stats=True): + with torch.no_grad(): + # on first forward, capture device used + # then, move inputs to requested capacity bins device + self._save_device(gate_output.device) + gate_output, capacity = gate_output.to(self.device), capacity.to(self.device) + + # get bins; if first call, calculate bins + bins = self._get_capacity_bins(gate_output.shape[0], gate_output.device) + + # find bin to use based on current capacity and update stats + index = torch.searchsorted(bins, capacity, right=False) + index = torch.min(index, torch.tensor(len(bins) - 1, dtype=capacity.dtype, device=self.device)) + if update_stats: + self._update_stats(index) + + return bins[index].to(self.model_device) + + def _update_stats(self, index): + # currently we maintain stats for training only + if self.training: + self.bins_usage[index] += 1 + + def _generate_bins(self, force_start_bin=False): + # create exponentially growing width bins, and normalize width sum to 1.0 + # when force_start_bin=True, we force the first bin value = start range (aka start). + # force_start_bin=True is handled by prepending width=0 + start = self.min_tokens_per_expert + stop = self.max_tokens_per_expert + exp_base = torch.tensor(self.capacity_bins_exp_base, dtype=torch.float).to(self.device) + if force_start_bin: + bin_widths = exp_base**torch.arange(0, self.num_capacity_bins - 1, device=self.device) + bin_widths = torch.cat([torch.tensor([0.], device=bin_widths.device), bin_widths]) + else: + bin_widths = exp_base**torch.arange(0, self.num_capacity_bins, device=self.device) + normalized_bin_widths = bin_widths / torch.sum(bin_widths) + + # calculate bin edges by accumulating the bins width and scaling to [start...stop] range + # finally, align bin edges + bin_edges = torch.cumsum(normalized_bin_widths, dim=0) + bin_edges = start + (stop - start) * bin_edges + bin_edges = torch.ceil(bin_edges / self.alignment).mul(self.alignment).to(torch.long) + + # verify that we got N distinct capacity bins + assert len(set(bin_edges.tolist())) == self.num_capacity_bins, \ + f'Resulting capacity bins size != {self.num_capacity_bins}, bins={bin_edges.tolist()}. ' \ + f'Please try to reduce expotent base value with HL_CAPACITY_BINS_EXP_BASE ' \ + f'(current value: {exp_base.item()}, minimal value: 1.0). ' \ + f'If this is insufficient, limit the number of capacity bins with ' \ + f'HL_MOE_NUM_CAPACITY_BINS (set to {self.num_capacity_bins}) or reduce alignment with ' \ + f'HL_MOE_CAPACITY_BINS_ALIGNMENT (set to {self.alignment}).' + + return bin_edges + + def _verify_configured_bins(self): + """ This method runs once (at first forward) and verifies that configured bins are valid """ + # verify configured bins range + if (self.capacity_bins[0].item() < self.min_tokens_per_expert + or self.capacity_bins[-1].item() < self.max_tokens_per_expert): + print( + f'Invalid capacity_bins={self.capacity_bins.clone().detach().cpu().tolist()},tokens per expert (min,max)={(self.min_tokens_per_expert, self.max_tokens_per_expert)}' + ) + return False + # verify configured bins alignment + alignment = torch.tensor(self.alignment, dtype=torch.long, device=self.device) + if torch.remainder(self.capacity_bins, alignment).sum().item() != 0: + print( + f'Invalid capacity_bins={self.capacity_bins.clone().detach().cpu().tolist()}, alignment={self.alignment} ' + ) + return False + return True + + def _get_capacity_bins(self, size: int, device: str) -> Union[torch.Tensor, None]: + """ Generates capacity bins with exponential growing width. + + During training, we encourage tokens to be evenly routed (via aux loss). + Therefore, generate bins with exponential growing bins width, i.e., bins that are + closer to the start are smaller and thus have less extra non-required capacity. + + Alignment is required when the bins have to be aligned on a specific value. + For example: + 1. Configured alignment (capacity_bins_alignment) due to e.g. hardware specific considerations + 2. When the non-experts are using TP and the experts ate not using TP, we + need to align the bins on TP boundary. + + Args: + gate_output (torch.Tensor): router gating function output tensor + + Returns: + bins tensor (torch.Tensor dtype=torch.long) + """ + # in case of first forward, initialize information based on gate_output + if self.min_tokens_per_expert is None: + # calculate optimal and worst case (min and max) tokens per expert + n_tokens_in_micro_batch = torch.tensor(size, device=device).to(torch.long) + n_optimal_tokens_per_expert = torch.ceil(self.k * n_tokens_in_micro_batch / self.num_experts).to( + torch.long) + self.min_tokens_per_expert = n_optimal_tokens_per_expert.item() + self.max_tokens_per_expert = n_tokens_in_micro_batch.item() + # handle bin alignment - maximum between configured alignment and TP (if used) + tp_alignment = 1 + if groups._get_expert_model_parallel_world_size() == 1 and groups.mpu is not None: + tp_alignment = bwc_tensor_model_parallel_world_size(groups.mpu) + self.alignment = max(self.configured_alignment, tp_alignment) + + # if bins configured (either configured by user or loaded from checkpoint) - verify valid bins + # otherwise, initialize bins + if self.capacity_bins[0] > 0: + if self.training and not self._verify_configured_bins(): + # temporary WA for diff in parameters such as seql, bs (number of tokens per expert change) after load from checkpoint + self.capacity_bins = self._generate_bins() + else: + self.capacity_bins = self._generate_bins() + + return self.capacity_bins + + +def optimize_bins(min_range, bins: torch.Tensor, bins_usage: torch.Tensor, alignment, min_bin_size) -> list: + """ Optimize MOE capacity bins according to collected bins usage statistics + + The bins are optimized to minimize the cost of binning. + The cost of each bin is defined as the additional tokens processed in this bin. + Since we don't have the actual capacities that were mapped to each bin, we use the median of the bin. + After we calculate the cost of all bins, we iteratively try to replace the lowest and highest cost bins + with 2 bins: the original highest cost bin and the median of the highest cost bin. + This way, we keep the number of bins constant while decreasing the overall cost of binning. + + For example: + Given bins [150, 200, 250, 300] with start of range=100 + And usage [100, 0, 50, 10 ] + + We first calculate the cost of each bin: + Cost: [25*100, 25*0, 25*50, 25*10] = [2500, 0, 1250, 250] + + Lowest cost bin is 200 (index=1) + Highest cost bin is 150 (index=0) + + First iteration of optimization: + Remove bin1 and split bin0 --> [125, 150, 250, 300] + """ + + def align_to(value): + return int(math.ceil(value / alignment) * alignment) + + # sort bins by their cost of usage (we want to split high cost bins) + # we assume that for each bin, the cost is 1/2 of its width * usage count + shifted_bins = torch.cat([torch.tensor([min_range], dtype=bins.dtype, device=bins.device), bins[:-1]]) + width = bins - shifted_bins + cost = bins_usage * width / 2.0 + sorted_cost = torch.argsort(cost, descending=False, stable=True).tolist() + + # sorted cost is in ascending order + # min_sort_idx is current index into sorted_cost for candidate bin to be removed + # max_sort_idx is current index into sorted_cost for candidate bin to be split + bins = bins.tolist() + n_bins = len(bins) + min_sort_idx = 0 + max_sort_idx = n_bins - 1 + new_bins = [] + while min_sort_idx <= max_sort_idx: + # if same cost, keep all remaining bins and exit + # this also handles the case of min_sort_idx == max_sort_idx + min_cost = cost[sorted_cost[min_sort_idx]] + max_cost = cost[sorted_cost[max_sort_idx]] + if min_cost == max_cost: + bin_indexes = sorted_cost[min_sort_idx:max_sort_idx + 1] + new_bins.extend([bins[idx] for idx in bin_indexes]) + break + + # last bin can't be removed + min_bin_idx = sorted_cost[min_sort_idx] + if min_bin_idx == (n_bins - 1): + new_bins.append(bins[min_bin_idx]) + min_sort_idx += 1 + continue + + # calculate the left & right bin's width of the candidate bin after we split it to 2 + # verify that both left & right will meet the min bin size requirement + max_bin_idx = sorted_cost[max_sort_idx] + max_bin_start = min_range if max_bin_idx == 0 else bins[max_bin_idx - 1] + max_bin_end = bins[max_bin_idx] + mid_point = (max_bin_start + max_bin_end) // 2 + mid_point = align_to(mid_point) + left_bin_width = mid_point - max_bin_start + right_bin_width = max_bin_end - mid_point + if left_bin_width < min_bin_size or right_bin_width < min_bin_size: + new_bins.append(bins[max_bin_idx]) + max_sort_idx -= 1 + continue + + # skip min cost bin and split max cost bin + new_bins.append(mid_point) + new_bins.append(max_bin_end) + min_sort_idx += 1 + max_sort_idx -= 1 + + # sort the bins in ascending order + bins = sorted(new_bins) + return bins diff --git a/deepspeed/moe/experts.py b/deepspeed/moe/experts.py index 0863221d7edf..8bb9c1bbc776 100644 --- a/deepspeed/moe/experts.py +++ b/deepspeed/moe/experts.py @@ -26,13 +26,17 @@ def __init__(self, expert: nn.Module, num_local_experts: int = 1, expert_group_n param.group_name = expert_group_name def forward(self, inputs: torch.Tensor) -> torch.Tensor: - chunks = inputs.chunk(self.num_local_experts, dim=1) - expert_outputs: List[torch.Tensor] = [] - - for chunk, expert in zip(chunks, self.deepspeed_experts): - out = expert(chunk) + if inputs.size(1) > 1: # if E>1 then process group chunks per group + chunks = inputs.chunk(self.num_local_experts, dim=1) + expert_outputs: List[torch.Tensor] = [] + for chunk, expert in zip(chunks, self.deepspeed_experts): + out = expert(chunk) + if isinstance(out, tuple): + out = out[0] # Ignore the bias term for now + expert_outputs += [out] + return torch.cat(expert_outputs, dim=1) + else: + out = self.deepspeed_experts[0](inputs) if isinstance(out, tuple): - out = out[0] # Ignore the bias term for now - expert_outputs += [out] - - return torch.cat(expert_outputs, dim=1) + out = out[0] + return out diff --git a/deepspeed/moe/layer.py b/deepspeed/moe/layer.py index 6777788ab885..c2459e2d6afb 100644 --- a/deepspeed/moe/layer.py +++ b/deepspeed/moe/layer.py @@ -10,8 +10,10 @@ from torch.nn import functional as F from deepspeed.utils import groups, log_dist +from deepspeed.utils.bwc import bwc_tensor_model_parallel_world_size from .experts import Experts from .sharded_moe import MOELayer, TopKGate +from deepspeed.accelerator import get_accelerator class MoE(nn.Module): @@ -33,6 +35,10 @@ class MoE(nn.Module): use_tutel (bool, optional): default=False, whether to use Tutel optimizations (if installed). enable_expert_tensor_parallelism (bool, optional): default=False, whether to use tensor parallelism for experts top2_2nd_expert_sampling (bool, optional): default=True, whether to perform sampling for 2nd expert + num_capacity_bins (int, optional): default=0, number of capacity bins to use in case of drop_tokens=False + capacity_bins_exp_base (float, optional): default=2.0, in case of capacity bins, exponential growing factor for bin width + capacity_bins_alignment (int, optional): default=1, in case of capacity bins, required bins alignment + configured_capacity_bins (list, optional): default=None, explicit configuration of capacity bin edges """ def __init__(self, @@ -50,7 +56,12 @@ def __init__(self, use_rts: bool = True, use_tutel: bool = False, enable_expert_tensor_parallelism: bool = False, - top2_2nd_expert_sampling: bool = True) -> None: + top2_2nd_expert_sampling: bool = True, + sequence_parallel: bool = False, + num_capacity_bins: int = 0, + capacity_bins_exp_base: float = 2.0, + capacity_bins_alignment: int = 1, + configured_capacity_bins: Optional[list] = None) -> None: super(MoE, self).__init__() @@ -61,7 +72,10 @@ def __init__(self, self.expert_group_name = f"ep_size_{self.ep_size}" self.num_experts = num_experts self.num_local_experts = num_experts // self.ep_size - + self.sequence_parallel = sequence_parallel + self.drop_tokens = drop_tokens + #TODO SW-179530: remove workaround when issue with lazy is resolved (see SW-179530). + expert.to(get_accelerator().device_name()) log_dist( f'Creating MoE layer with num_experts: {num_experts} | num_local_experts: {self.num_local_experts} | expert_parallel_size: {self.ep_size}', [0]) @@ -70,14 +84,28 @@ def __init__(self, 'Unsupported noisy_gate_policy: ' + noisy_gate_policy experts = Experts(expert, self.num_local_experts, self.expert_group_name) - self.deepspeed_moe = MOELayer(TopKGate(hidden_size, num_experts, k, capacity_factor, eval_capacity_factor, - min_capacity, noisy_gate_policy, drop_tokens, use_rts, None, - top2_2nd_expert_sampling), + self.deepspeed_moe = MOELayer(TopKGate(hidden_size, + num_experts, + k, + capacity_factor, + eval_capacity_factor, + min_capacity, + noisy_gate_policy, + drop_tokens, + use_rts, + None, + top2_2nd_expert_sampling, + self.sequence_parallel, + num_capacity_bins, + capacity_bins_exp_base, + capacity_bins_alignment, + configured_bins=configured_capacity_bins), experts, self.expert_group_name, self.ep_size, self.num_local_experts, - use_tutel=use_tutel) + use_tutel=use_tutel, + sequence_parallel=self.sequence_parallel) if self.use_residual: self.mlp = expert # coefficient is used for weighted sum of the output of expert and mlp @@ -87,20 +115,31 @@ def set_deepspeed_parallelism(self, use_data_before_expert_parallel_: bool = Fal self._create_process_groups(use_data_before_expert_parallel_=use_data_before_expert_parallel_) def _create_process_groups(self, use_data_before_expert_parallel_: bool = False) -> None: + # For sequence-parallel + expert-tp + no token-dropping, create a process group with ranks of EP + TP. + # This group is required to reduce_max the local token capacity across EP + TP ranks. + tp_enabled = bwc_tensor_model_parallel_world_size(groups.mpu) > 1 + expert_tp_enabled = self.enable_expert_tensor_parallelism and tp_enabled + use_ep_tp_group = self.sequence_parallel and not self.drop_tokens and expert_tp_enabled + # Create process group for a layer if needed if self.expert_group_name not in groups._get_expert_parallel_group_dict(): print(f"No existing process group found, creating a new group named: {self.expert_group_name}") - if (groups.mpu is None) or (not self.enable_expert_tensor_parallelism): - # Condition 1 - no groups.mpu means no tensor parallelism - # Condition 2 - disabling expert tensor parallelism on purpose + if not expert_tp_enabled: + # expert tensor parallelism is disabled, use only expert parallelism and data parallelism groups._create_expert_and_data_parallel( self.ep_size, use_data_before_expert_parallel_=use_data_before_expert_parallel_) else: - # expert tensor parallelism is enabled + # expert tensor parallelism is enabled, use expert, data and tensor parallelism groups._create_expert_data_and_model_parallel( - self.ep_size, mpu=groups.mpu, use_data_before_expert_parallel_=use_data_before_expert_parallel_) + self.ep_size, + mpu=groups.mpu, + use_data_before_expert_parallel_=use_data_before_expert_parallel_, + create_expert_tensor_parallel_group=use_ep_tp_group) + # Set the group handle for the MOELayer (deepspeed_moe) object self.deepspeed_moe._set_ep_group(groups._get_expert_parallel_group(self.expert_group_name)) + if use_ep_tp_group: + self.deepspeed_moe._set_ep_tp_group(groups._get_expert_tensor_parallel_group(self.expert_group_name)) def forward(self, hidden_states: torch.Tensor, diff --git a/deepspeed/moe/sharded_moe.py b/deepspeed/moe/sharded_moe.py index 96eab5e2ab17..43e685778022 100644 --- a/deepspeed/moe/sharded_moe.py +++ b/deepspeed/moe/sharded_moe.py @@ -26,6 +26,7 @@ import torch.nn.functional as F from deepspeed.utils import groups from .mappings import drop_tokens, gather_tokens +from .capacity_bins import CapacityBins if TYPE_CHECKING: Base = Module[Tensor] @@ -178,6 +179,12 @@ def _one_hot_to_float(x, num_classes): return F.one_hot(x, num_classes=num_classes).float() +def _calculate_expert_weight(gates: Tensor, mask: Tensor, locations: Tensor, capacity: Tensor) -> Tensor: + gates = einsum("s,se->se", gates, mask) + locations = _one_hot_to_float(locations, capacity) + return einsum("se,sc->sec", gates, locations) + + def top1gating(logits: Tensor, capacity_factor: float, min_capacity: int, @@ -186,7 +193,9 @@ def top1gating(logits: Tensor, drop_tokens: bool = True, use_rts: bool = True, ep_group: Union[torch.distributed.ProcessGroup, None] = None, - use_tutel: bool = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + use_tutel: bool = False, + ep_tp_group: Union[torch.distributed.ProcessGroup, None] = None, + capacity_bins: Union[CapacityBins, None] = None) -> Tuple[Tensor, Tensor, Tensor, Tensor]: """Implements Top1Gating on logits.""" if noisy_gate_policy == 'RSample': logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device) @@ -206,13 +215,14 @@ def top1gating(logits: Tensor, mask1 = einsum("s,se->se", used_token, mask1) # gating decisions - exp_counts = torch.sum(mask1, dim=0).detach().to('cpu') + exp_counts = torch.sum(mask1, dim=0) # if we don't want to drop any tokens if not drop_tokens: - new_capacity = torch.max(exp_counts).to(logits.device) + new_capacity = torch.max(exp_counts) # Communicate across expert processes to pick the maximum capacity. - if ep_group is not None: + group = ep_tp_group if ep_tp_group is not None else ep_group + if group is not None: dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=ep_group) if groups._get_expert_model_parallel_world_size() == 1: # If the non-expert is tensor-parallel, we need to pad the capacity to 'tp'. @@ -221,6 +231,9 @@ def top1gating(logits: Tensor, new_capacity = torch.ceil(new_capacity / tp).mul(tp).to(new_capacity.dtype) # Make sure the capacity value does not exceed the number of tokens. capacity = min(new_capacity, torch.tensor(mask1.size(0)).to(new_capacity.device)) + if capacity_bins is not None: + capacity = capacity_bins.get_binned_capacity(gate_output=logits, capacity=capacity) + exp_counts = exp_counts.detach().to('cpu') # Compute l_aux me = torch.mean(gates, dim=0) @@ -290,7 +303,9 @@ def top2gating(logits: Tensor, min_capacity: int, drop_tokens: bool = True, ep_group: Union[torch.distributed.ProcessGroup, None] = None, - top2_2nd_expert_sampling: bool = True) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + top2_2nd_expert_sampling: bool = True, + ep_tp_group: Union[torch.distributed.ProcessGroup, None] = None, + capacity_bins: Union[CapacityBins, None] = None) -> Tuple[Tensor, Tensor, Tensor, Tensor]: """Implements Top2Gating on logits.""" # everything is in fp32 in this function gates = F.softmax(logits, dim=1) @@ -303,7 +318,7 @@ def top2gating(logits: Tensor, if top2_2nd_expert_sampling: # Create a mask for 2nd's expert per token using Gumbel-max trick # https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/ - logits += gumbel_rsample(logits.shape, device=logits.device) + logits = logits + gumbel_rsample(logits.shape, device=logits.device) # Replace top-expert with min value logits_except1 = logits.masked_fill(mask1.bool(), float("-inf")) @@ -332,14 +347,17 @@ def top2gating(logits: Tensor, else: # Do not drop tokens - set capacity according to current expert assignments new_capacity = torch.max(exp_counts) + group = ep_tp_group if ep_tp_group is not None else ep_group if ep_group is not None: - dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=ep_group) + dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=group) if groups._get_expert_model_parallel_world_size() == 1: # If the non-expert is tensor-parallel, we need to pad the capacity to 'tp'. # This is since we are going to activate drop_tokens() to drop duplicate tokens. tp = 1 if groups.mpu is None else bwc_tensor_model_parallel_world_size(mpu=groups.mpu) new_capacity = torch.ceil(new_capacity / tp).mul(tp).to(new_capacity.dtype) capacity = new_capacity + if capacity_bins is not None: + capacity = capacity_bins.get_binned_capacity(gate_output=logits, capacity=capacity) # Store the capacity location for each token locations1_s = torch.sum(locations1 * mask1, dim=1) @@ -357,13 +375,8 @@ def top2gating(logits: Tensor, gates2_s /= denom_s # Calculate combine_weights and dispatch_mask - gates1 = einsum("s,se->se", gates1_s, mask1_float) - gates2 = einsum("s,se->se", gates2_s, mask2_float) - locations1_sc = _one_hot_to_float(locations1_s, capacity) - locations2_sc = _one_hot_to_float(locations2_s, capacity) - combine1_sec = einsum("se,sc->sec", gates1, locations1_sc) - combine2_sec = einsum("se,sc->sec", gates2, locations2_sc) - combine_weights = combine1_sec + combine2_sec + combine_weights = _calculate_expert_weight(gates1_s, mask1_float, locations1_s, capacity) + combine_weights += _calculate_expert_weight(gates2_s, mask2_float, locations2_s, capacity) dispatch_mask = combine_weights.bool() return l_aux, combine_weights, dispatch_mask, exp_counts.detach().to('cpu') @@ -398,7 +411,12 @@ def __init__(self, drop_tokens: bool = True, use_rts: bool = True, ep_group: Union[torch.distributed.ProcessGroup, None] = None, - top2_2nd_expert_sampling: bool = True) -> None: + top2_2nd_expert_sampling: bool = True, + sequence_parallel: bool = False, + num_capacity_bins: int = 0, + capacity_bins_exp_base: float = 2.0, + capacity_bins_alignment: int = 1, + configured_bins: Union[list, None] = None) -> None: super().__init__() # Only top-1 and top-2 are supported at the moment. @@ -406,6 +424,8 @@ def __init__(self, raise ValueError('Only top-1 and top-2 gatings are supported.') self.wg = torch.nn.Linear(model_dim, num_experts, bias=False) self.ep_group = ep_group + self.ep_tp_group = None + self.num_experts = num_experts self.k = k self.capacity_factor = capacity_factor self.eval_capacity_factor = eval_capacity_factor @@ -417,6 +437,23 @@ def __init__(self, self.drop_tokens = drop_tokens self.use_rts = use_rts self.top2_2nd_expert_sampling = top2_2nd_expert_sampling + self.sequence_parallel = sequence_parallel + if self.sequence_parallel: + setattr(self.wg.weight, 'sequence_parallel', True) + self.capacity_bins = None + if not self.drop_tokens and num_capacity_bins > 0: + assert capacity_bins_exp_base >= 1.0, \ + f'capacity_bins_exp_base must be >= 1.0, but got {capacity_bins_exp_base}' + self.capacity_bins = CapacityBins(k, + num_experts, + num_capacity_bins, + capacity_bins_exp_base, + capacity_bins_alignment, + configured_bins=configured_bins) + + def _set_ep_tp_group(self, ep_tp_group): + assert self.ep_tp_group is None, f'Attempting to override an existing ep_tp_group' + self.ep_tp_group = ep_tp_group def _set_ep_group(self, ep_group): assert self.ep_group is None, f'Attempting to override an existing ep_group' @@ -439,11 +476,13 @@ def forward(self, if self.k == 1: gate_output = top1gating(logits, self.capacity_factor if self.training else self.eval_capacity_factor, self.min_capacity, used_token, self.noisy_gate_policy if self.training else None, - self.drop_tokens, self.use_rts, self.ep_group, use_tutel) + self.drop_tokens, self.use_rts, self.ep_group, use_tutel, self.ep_tp_group, + self.capacity_bins) else: gate_output = top2gating(logits, self.capacity_factor if self.training else self.eval_capacity_factor, - self.min_capacity, self.drop_tokens, self.ep_group, self.top2_2nd_expert_sampling) + self.min_capacity, self.drop_tokens, self.ep_group, self.top2_2nd_expert_sampling, + self.ep_tp_group, self.capacity_bins) if self.wall_clock_breakdown: self.timers(TOPK_GATE_TIMER).stop() @@ -451,6 +490,33 @@ def forward(self, return gate_output + def get_stats(self, incremental=True): + if self.capacity_bins is not None: + capacity_stats = self.capacity_bins.get_stats(incremental) + if capacity_stats is not None: + return {'capacity_bins': capacity_stats} + return None + + def has_capacity_bins(self): + return self.capacity_bins is not None + + def get_capacity_bins(self): + return self.capacity_bins + + +class AuxLossForSequenceParallel(torch.autograd.Function): + """ For sequence parallel, we need to divide the auxiliary loss gradient by TP. """ + + @staticmethod + def forward(ctx, aux_loss): + return aux_loss.clone() + + @staticmethod + def backward(ctx, grad_output): + grad_input = grad_output.clone() + grad_input *= (1.0 / bwc_tensor_model_parallel_world_size(mpu=groups.mpu)) + return grad_input + class MOELayer(Base): """MOELayer module which implements MixtureOfExperts as described in Gshard_. @@ -476,7 +542,8 @@ def __init__(self, ep_group_name, ep_size, num_local_experts: int, - use_tutel: bool = False) -> None: + use_tutel: bool = False, + sequence_parallel: bool = False) -> None: super().__init__() self.gate = gate self.experts = experts @@ -489,6 +556,8 @@ def __init__(self, self.time_moe = 0.0 self.timers = SynchronizedWallClockTimer() self.wall_clock_breakdown = False + self.sequence_parallel = sequence_parallel + self.l_aux = None self.use_tutel = use_tutel and TUTEL_INSTALLED and gate.k == 1 @@ -505,35 +574,45 @@ def _set_ep_group(self, ep_group): self.ep_group = ep_group self.gate._set_ep_group(ep_group) + def _set_ep_tp_group(self, ep_tp_group): + self.ep_tp_group = ep_tp_group + self.gate._set_ep_tp_group(ep_tp_group) + def forward(self, *input: Tensor, **kwargs: Any) -> Tensor: if self.wall_clock_breakdown: self.timers(MOE_TIMER).start() # Implement Algorithm 2 from GShard paper. - d_model = input[0].shape[-1] + S, B, M = input[0].shape[:3] # [S, B, M] # Initial implementation -> Reshape into S tokens by dropping sequence dimension. # Reshape into G groups so that each group can distribute tokens equally # group_size = kwargs['group_size'] if 'group_size' in kwargs.keys() else 1 - reshaped_input = input[0].reshape(-1, d_model) + reshaped_input = input[0].reshape(-1, M) + + expert_model_parallel_world_size = groups._get_expert_model_parallel_world_size() if self.use_tutel: self.l_aux, C, E, indices_, locations_, gates_, self.exp_counts = self.gate(reshaped_input, input[1], True) - S, M = reshaped_input.size(0), reshaped_input.size(1) if not hasattr(self, '_tutel_dispatcher'): self._tutel_dispatcher = tutel_moe.fast_dispatcher(E, C, M, dispatch_dtype=reshaped_input.dtype) self._tutel_dispatcher.update(indices_, locations_, gates_, capacity=C) dispatched_input = self._tutel_dispatcher.encode(reshaped_input) else: - self.l_aux, combine_weights, dispatch_mask, self.exp_counts = self.gate(reshaped_input, input[1]) - dispatched_input = einsum("sec,sm->ecm", dispatch_mask.type_as(input[0]), reshaped_input) + l_aux, combine_weights, dispatch_mask, self.exp_counts = self.gate(reshaped_input, input[1]) + combine_weights = combine_weights.type_as(input[0]) + dispatch_mask = dispatch_mask.type_as(input[0]) + dispatched_input = einsum("sec,sm->ecm", dispatch_mask, reshaped_input) + if self.sequence_parallel and expert_model_parallel_world_size > 1: + l_aux = AuxLossForSequenceParallel.apply(l_aux) + self.l_aux = l_aux if self.wall_clock_breakdown: self.timers(FIRST_ALLTOALL_TIMER).start() - if groups._get_expert_model_parallel_world_size() == 1: + if expert_model_parallel_world_size == 1: # If the non-expert is tensor-parallel, it will create # duplicate tokens on the tensor-parallel ranks. # Since our experts are not tensor-parallel, these duplicates @@ -549,7 +628,7 @@ def forward(self, *input: Tensor, **kwargs: Any) -> Tensor: self.time_falltoall = self.timers(FIRST_ALLTOALL_TIMER).elapsed(reset=False) # Re-shape after all-to-all: ecm -> gecm - dispatched_input = dispatched_input.reshape(self.ep_size, self.num_local_experts, -1, d_model) + dispatched_input = dispatched_input.reshape(self.ep_size, self.num_local_experts, -1, M) expert_output = self.experts(dispatched_input) @@ -563,7 +642,7 @@ def forward(self, *input: Tensor, **kwargs: Any) -> Tensor: self.time_salltoall = self.timers(SECOND_ALLTOALL_TIMER).elapsed(reset=False) # Re-shape back: gecm -> ecm - expert_output = expert_output.reshape(self.ep_size * self.num_local_experts, -1, d_model) + expert_output = expert_output.reshape(self.ep_size * self.num_local_experts, -1, M) if groups._get_expert_model_parallel_world_size() == 1: # the dropped duplicate tokens need to be gathered on each @@ -572,14 +651,14 @@ def forward(self, *input: Tensor, **kwargs: Any) -> Tensor: expert_output = gather_tokens(expert_output, dim=1) if self.use_tutel: - combined_output = self._tutel_dispatcher.decode(expert_output.view(E * C, M)) + expert_output = self._tutel_dispatcher.decode(expert_output.view(E * C, M)) else: - combined_output = einsum("sec,ecm->sm", combine_weights.type_as(input[0]), expert_output) + expert_output = einsum("sec,ecm->sm", combine_weights, expert_output) - a = combined_output.reshape(input[0].shape) + expert_output = expert_output.reshape(S, B, M) if self.wall_clock_breakdown: self.timers(MOE_TIMER).stop() self.time_moe = self.timers(MOE_TIMER).elapsed(reset=False) - return a + return expert_output diff --git a/deepspeed/ops/__init__.py b/deepspeed/ops/__init__.py index 7ea5ce5af19e..15179984173c 100755 --- a/deepspeed/ops/__init__.py +++ b/deepspeed/ops/__init__.py @@ -9,7 +9,7 @@ from . import lion from . import sparse_attention from . import transformer - +from . import fp_quantizer from .transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig from ..git_version_info import compatible_ops as __compatible_ops__ diff --git a/deepspeed/ops/fp_quantizer/__init__.py b/deepspeed/ops/fp_quantizer/__init__.py index 995bbae4aeaf..51377bc6092c 100644 --- a/deepspeed/ops/fp_quantizer/__init__.py +++ b/deepspeed/ops/fp_quantizer/__init__.py @@ -4,3 +4,9 @@ # DeepSpeed Team from .quantize import FP_Quantize, Quantizer + +try: + import triton + from .fp8_gemm import matmul_fp8 +except ImportError: + pass diff --git a/deepspeed/ops/fp_quantizer/fp8_gemm.py b/deepspeed/ops/fp_quantizer/fp8_gemm.py new file mode 100644 index 000000000000..55504e3af8c9 --- /dev/null +++ b/deepspeed/ops/fp_quantizer/fp8_gemm.py @@ -0,0 +1,171 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +######## Fused MoE kernel ######### +# These kernels are implemented for +# fusing GeMM with dequantization of +# fp8 weight data when using bit-16 +# activation. +################################### + +import torch +import triton +import triton.language as tl + + +@triton.jit +def matmul_kernel_fp8_bf16(inp_ptr, weight_ptr, out_ptr, scale_ptr, M, N, K, stride_am, stride_ak, stride_bk, + stride_bn, stride_cm, stride_cn, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr, + quantization_group_size: tl.constexpr): + pid = tl.program_id(axis=0) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) + num_pid_in_group = GROUP_SIZE_M * num_pid_n + group_id = pid // num_pid_in_group + first_pid_m = group_id * GROUP_SIZE_M + group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) + pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m) + pid_n = (pid % num_pid_in_group) // group_size_m + + offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M + offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N + offs_k = tl.arange(0, BLOCK_SIZE_K) + + inp_data = inp_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak) + weight_data = weight_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn) + weight_ptrs_offset = offs_k[:, None] * (stride_bk // quantization_group_size) + ( + (pid_n * BLOCK_SIZE_N) // quantization_group_size) + + weight = tl.load(weight_data, mask=offs_k[:, None] < K, other=0.0) + scale = tl.load(scale_ptr + weight_ptrs_offset) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)): + inp = tl.load(inp_data, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + # Dequantize weight (fp8 -> bf16) + w = (((weight & 0x80) << 8) | ((weight & 0x7f) << 4)).to(tl.uint16) + w = (w + 0x3C00).to(tl.uint16) + w = (w.to(tl.bfloat16, bitcast=True) * scale).to(tl.bfloat16) + + inp_data += BLOCK_SIZE_K * stride_ak + weight_data += BLOCK_SIZE_K * stride_bk + weight_mask = offs_k[:, None] < K - (k + 1) * BLOCK_SIZE_K + weight = tl.load(weight_data, mask=weight_mask, other=0.0) + scale = tl.load(scale_ptr + (weight_ptrs_offset + + (((k + 1) * BLOCK_SIZE_K * stride_bk) // quantization_group_size)), + mask=weight_mask, + other=0.0) + + accumulator += tl.dot(inp, w) + + out = accumulator.to(tl.bfloat16) + + offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + out_data = out_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :] + tl.store(out_data, out, mask=(offs_cm[:, None] < M) & (offs_cn[None, :] < N)) + + +@triton.jit +def matmul_kernel_fp8_fp16(inp_ptr, weight_ptr, out_ptr, scale_ptr, M, N, K, stride_am, stride_ak, stride_bk, + stride_bn, stride_cm, stride_cn, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr, + quantization_group_size: tl.constexpr): + pid = tl.program_id(axis=0) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) + num_pid_in_group = GROUP_SIZE_M * num_pid_n + group_id = pid // num_pid_in_group + first_pid_m = group_id * GROUP_SIZE_M + group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) + pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m) + pid_n = (pid % num_pid_in_group) // group_size_m + + offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M + offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N + offs_k = tl.arange(0, BLOCK_SIZE_K) + + inp_data = inp_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak) + weight_data = weight_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn) + weight_ptrs_offset = offs_k[:, None] * (stride_bk // quantization_group_size) + ( + (pid_n * BLOCK_SIZE_N) // quantization_group_size) + + weight = tl.load(weight_data, mask=offs_k[:, None] < K, other=0.0) + scale = tl.load(scale_ptr + weight_ptrs_offset) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)): + inp = tl.load(inp_data, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + # Dequantize weight (fp8 -> fp16) + w = (((weight & 0x80) << 8) | ((weight & 0x7f) << 7)).to(tl.uint16) + w = (w + 0x2000).to(tl.uint16) + w = (w.to(tl.float16, bitcast=True) * scale).to(tl.float16) + + inp_data += BLOCK_SIZE_K * stride_ak + weight_data += BLOCK_SIZE_K * stride_bk + + weight = tl.load(weight_data, mask=offs_k[:, None] < K - (k + 1) * BLOCK_SIZE_K, other=0.0) + scale = tl.load(scale_ptr + (weight_ptrs_offset + + (((k + 1) * BLOCK_SIZE_K * stride_bk) // quantization_group_size))) + + accumulator += tl.dot(inp, w) + + out = accumulator.to(tl.float16) + + offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + out_data = out_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :] + tl.store(out_data, out, mask=(offs_cm[:, None] < M) & (offs_cn[None, :] < N)) + + +def matmul_fp8(inp, weight, scale, quantization_group_size): + + assert inp.shape[1] == weight.shape[0], \ + f"Incompatible dimensions (input: {inp.shape}, weight: {weight.shape})" + + M, K = inp.shape + K, N = weight.shape + + out = torch.empty((M, N), device=inp.device, dtype=inp.dtype) + + # GEMM tuning parameters! + # TODO: Add a more configurable tuning for selecting the best GeMM + BLOCK_SIZE_M = 16 if M <= 16 else 32 if M <= 32 else 64 if M <= 64 else 128 + BLOCK_SIZE_N = 64 + BLOCK_SIZE_K = max(64, quantization_group_size) + GROUP_SIZE_M = 8 + num_stages = 4 + num_warps = 4 + if M >= 256: + BLOCK_SIZE_M = 256 + BLOCK_SIZE_N = 128 + BLOCK_SIZE_K = max(128, quantization_group_size) + num_stages = 3 + num_warps = 8 + + grid = lambda META: (triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']), ) + kernel = matmul_kernel_fp8_bf16 if inp.dtype == torch.bfloat16 else matmul_kernel_fp8_fp16 + kernel[grid](inp, + weight, + out, + scale, + M, + N, + K, + inp.stride(0), + inp.stride(1), + weight.stride(0), + weight.stride(1), + out.stride(0), + out.stride(1), + quantization_group_size=quantization_group_size, + BLOCK_SIZE_M=BLOCK_SIZE_M, + BLOCK_SIZE_N=BLOCK_SIZE_N, + BLOCK_SIZE_K=BLOCK_SIZE_K, + GROUP_SIZE_M=GROUP_SIZE_M, + num_stages=num_stages, + num_warps=num_warps) + return out diff --git a/deepspeed/ops/fp_quantizer/quantize.py b/deepspeed/ops/fp_quantizer/quantize.py index f8435bda16c1..8892c4619540 100644 --- a/deepspeed/ops/fp_quantizer/quantize.py +++ b/deepspeed/ops/fp_quantizer/quantize.py @@ -7,14 +7,16 @@ import abc from abc import ABC +import gc from deepspeed.ops.op_builder import FPQuantizerBuilder +from deepspeed.accelerator import get_accelerator fp_quant_module = None class Quantizer(ABC): """ - Abstract Quantizer class that implmenents quantize/dequantize methods. + Abstract Quantizer class that implements quantize/dequantize methods. Arguments: group_size (int, optional): number of values or elements that are grouped @@ -40,12 +42,22 @@ def dequantize(self, input_q, fp_out=None, q_bits=8, q_mantisa_bits=3, scale=Non class FP_Quantize(Quantizer): - def __init__(self, group_size=512) -> None: + def __init__(self, quantization_config) -> None: global fp_quant_module - super().__init__(group_size=group_size) + super().__init__(group_size=quantization_config.group_size) if fp_quant_module is None: fp_quant_module = FPQuantizerBuilder().load() + self.is_fallback = fp_quant_module is None + self.quantize_func = self.quantize_fallback if self.is_fallback else fp_quant_module.quantize + self.get_scales_func = self.get_scales_fallback if self.is_fallback else fp_quant_module.get_scales + self.dequantize_func = self.dequantize_fallback if self.is_fallback else fp_quant_module.dequantize + self.selective_dequantize_func = self.selective_dequantize_fallback if self.is_fallback else fp_quant_module.selective_dequantize + self.q_config = quantization_config + self.orig_dtype = None + self.num_groups = None + self.input_q = None + self.scale = None def quantize(self, input, @@ -72,14 +84,41 @@ def quantize(self, assert (0), \ f"Missing {q_bits}-quantization, please add the template arguments for the kernel to support this precision!" - out = fp_quant_module.quantize(input, self.group_size, stochastic_mode, q_bits, q_mantisa_bits) - + # Adding (group_size - 1) is for padding + self.num_groups = (input.numel() + self.q_config.group_size - 1) // self.q_config.group_size + # group_size should be the minimal number between the defined group size and number of elements in tensor. + group_size = int(min(self.q_config.group_size, input.numel()) * q_bits) // 8 + # CUDA quantization kernel saves the scale as (fp32) inside the quantized tensor for each group + if not self.is_fallback: + group_size += 4 + # CUDA quantization kernel allocates tensors as uint8, but handles them as fp8 inside the kernel. + self.input_q = torch.ones(self.num_groups, group_size, dtype=self.q_config.q_dtype, device=input.device) + out = self.quantize_func(self.input_q, input, group_size, stochastic_mode, q_bits, q_mantisa_bits) if return_meta_tensor: - data, scale = out.split(self.group_size, dim=-1) - return data.contiguous().reshape(input.shape), scale.contiguous() + if not self.is_fallback: + data, self.scale = out.split(group_size, dim=-1) + data = data.contiguous().reshape(input.shape) + else: + data = out.contiguous().reshape(input.shape) + self.scale = self.scale.contiguous() + del self.input_q + del out + gc.collect() + get_accelerator().empty_cache() + return data, self.scale return out + def to(self, *args, **kwargs): + # Intermediate tensors may need to be moved to different devices + if hasattr(self, 'input_q') and self.input_q is not None: + self.input_q = self.input_q.to(*args, **kwargs) + if hasattr(self, 'scale') and self.scale is not None: + self.scale = self.scale.to(*args, **kwargs) + + def get_scales(self): + return self.get_scales_func(self.scale, self.num_groups) + def dequantize(self, input_q, fp_out=None, q_bits=8, q_mantisa_bits=3, scale=None) -> torch.Tensor: assert (self.orig_dtype is not None), \ "[De-quantization Error]: you need to call quantize before dequantizing!" @@ -97,12 +136,15 @@ def dequantize(self, input_q, fp_out=None, q_bits=8, q_mantisa_bits=3, scale=Non assert (0), \ f"Missing {q_bits}-dequantization, please add the template arguments for the kernel to support this precision!" - if scale is not None: + if scale is not None and not self.is_fallback: assert input_q.numel() == fp_out.numel(), \ f'[De-quantization Error]: quantized data should have the same size as original tensor when scale is not None!' - input_q = torch.cat([input_q.reshape(-1, self.group_size), scale], dim=-1).contiguous() + input_q = torch.cat([input_q.reshape(-1, self.q_config.group_size), scale], dim=-1).contiguous() + elif scale is not None and self.is_fallback: + group_size = int(min(self.q_config.group_size, input_q.numel()) * q_bits) // 8 + input_q = input_q.reshape(-1, group_size) - fp_quant_module.dequantize(fp_out, input_q, self.group_size, q_mantisa_bits, q_bits - q_mantisa_bits - 1) + self.dequantize_func(fp_out, input_q, self.q_config.group_size, q_mantisa_bits, q_bits - q_mantisa_bits - 1) return fp_out def selective_dequantize(self, @@ -131,11 +173,47 @@ def selective_dequantize(self, assert (0), \ f"Missing {q_bits}-dequantization, please add the template arguments for the kernel to support this precision!" - if scale is not None: + if scale is not None and not self.is_fallback: assert input_q.numel() == fp_out.numel(), \ f'[De-quantization Error]: quantized data should have the same size as original tensor when scale is not None!' - input_q = torch.cat([input_q.reshape(-1, self.group_size), scale], dim=-1).contiguous() + input_q = torch.cat([input_q.reshape(-1, self.q_config.group_size), scale], dim=-1).contiguous() + + self.selective_dequantize_func(fp_out, input_q, indexes, self.q_config.group_size, q_mantisa_bits, + q_bits - q_mantisa_bits - 1) + return fp_out + + def selective_dequantize_fallback(self, val_q, scales, indexes, group_size, q_mantisa_bits, q_exponent_bits): + assert False, "Selective dequantize fallback isn't implemented!" - fp_quant_module.selective_dequantize(fp_out, input_q, indexes, self.group_size, q_mantisa_bits, - q_bits - q_mantisa_bits - 1) + def dequantize_fallback(self, fp_out, input_q, group_size, q_mantisa_bits, q_exponent_bits): + # TODO: [SW-208941] clear gaudi specific code. + dequant_out = torch.ops.hpu.cast_from_fp8(input_q, (1.0 / self.scale), self.orig_dtype).view(self.orig_shape) + fp_out.copy_(dequant_out) return fp_out + + def quantize_fallback(self, out, val, group_size, stochastic_rounding, q_bits, q_mantisa_bits): + assert q_bits == 8, "Quantize fallback only supports quantization to FP8" + assert q_mantisa_bits == 3, "Quantize fallback only supports q_mantissa_bits = 3" + # TODO: [SW-208941] clear gaudi specific code. + import habana_frameworks.torch.utils.experimental as htexp + if htexp._get_device_type() == htexp.synDeviceType.synDeviceGaudi2: + assert self.q_config.q_range_dtype == torch.float8_e4m3fnuz, "Gaudi2 has a unique dtype: float8_e4m3fnuz" + + # Reshape the tensor + val_reshaped = val.view(self.num_groups, group_size).float() + # Calculate the scale + max_vals = val_reshaped.abs().max(dim=1, keepdim=True)[0] + q_range = torch.finfo(self.q_config.q_range_dtype).max + self.scale = q_range / max_vals + # TODO: [SW-208941] clear gaudi specific code. + # Copy quantized + quant, _ = torch.ops.hpu.cast_to_fp8_v2(val_reshaped, + self.scale, + stochastic_rounding, + dtype=self.q_config.q_dtype) + out.copy_(quant) + + return out + + def get_scales_fallback(self, out, num_groups): + return self.scale diff --git a/deepspeed/ops/transformer/inference/config.py b/deepspeed/ops/transformer/inference/config.py index 9709328cc133..c0dd29f4f962 100644 --- a/deepspeed/ops/transformer/inference/config.py +++ b/deepspeed/ops/transformer/inference/config.py @@ -103,7 +103,6 @@ def __init__(self, self.return_tuple = return_tuple self.mlp_after_attn = mlp_after_attn self.mlp_act_func_type = mlp_act_func_type - self.specialized_mode = False self.training_mp_size = training_mp_size self.bigscience_bloom = bigscience_bloom self.max_out_tokens = max_out_tokens diff --git a/deepspeed/ops/transformer/inference/diffusers_attention.py b/deepspeed/ops/transformer/inference/diffusers_attention.py index 5efc560db75e..3c2340ccfc6f 100644 --- a/deepspeed/ops/transformer/inference/diffusers_attention.py +++ b/deepspeed/ops/transformer/inference/diffusers_attention.py @@ -10,10 +10,11 @@ from packaging import version as pkg_version from deepspeed.utils.logging import log_dist from deepspeed.accelerator import get_accelerator -from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer.inference.op_binding.workspace import WorkspaceOp +from deepspeed.ops.transformer.inference.op_binding.softmax_context import SoftmaxContextOp +from deepspeed.ops.transformer.inference.op_binding import LinearOp +from deepspeed.ops.transformer.inference.op_binding.pad_transform import PadTransformOp -# Cuda modules will be imported if needed -inference_module = None minus_inf = -10000.0 triton_flash_attn = None @@ -36,7 +37,8 @@ class DeepSpeedDiffusersAttentionFunction(Function): @staticmethod def forward(ctx, input, context, input_mask, config, attn_qkvw, attn_qw, attn_kw, attn_vw, attn_qkvb, num_attention_heads_per_partition, norm_factor, hidden_size_per_partition, attn_ow, attn_ob, - do_out_bias, score_context_func, linear_func, triton_flash_attn_kernel, rope_theta): + do_out_bias, score_context_func, linear_func, pad_transform_func, triton_flash_attn_kernel, + rope_theta): def _transpose_for_context(x): x = x.permute(0, 2, 1, 3) @@ -77,7 +79,7 @@ def selfAttention_fp(input, context, input_mask): query = query.contiguous() key = key.contiguous() value = value.contiguous() - query, key, value = inference_module.pad_transform_fp16(query, key, value, config.heads, do_flash_attn) + query, key, value = pad_transform_func(query, key, value, config.heads, do_flash_attn) attention_scores = (torch.matmul(query, key.transpose(-1, -2)) * scale).softmax(dim=-1) context_layer = _transpose_for_context(torch.matmul(attention_scores, value)) @@ -117,10 +119,6 @@ def __init__( data_type = self.config.dtype data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype - global inference_module - if inference_module is None: - builder = InferenceBuilder() - inference_module = builder.load() if DeepSpeedDiffusersAttention.layer_id == 1: log_dist(f"DeepSpeed-Attention config: {self.config.__dict__}", [0]) @@ -171,26 +169,24 @@ def __init__( self.norm_factor *= math.sqrt(self.config.layer_id + 1) # https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/gpt2/modeling_gpt2.py#L191 - if self.config.dtype in [torch.float16, torch.int8]: - self.score_context_func = inference_module.softmax_context_fp16 - self.linear_func = inference_module.linear_layer_fp16 - self.allocate_workspace = inference_module.allocate_workspace_fp16 - else: - self.score_context_func = inference_module.softmax_context_fp32 - self.linear_func = inference_module.linear_layer_fp32 - self.allocate_workspace = inference_module.allocate_workspace_fp32 + self.workspace = WorkspaceOp(self.config) + self.score_context_func = SoftmaxContextOp(self.config) + self.linear_func = LinearOp(self.config) + self.pad_transform_func = PadTransformOp(self.config) - def forward(self, input, context=None, input_mask=None): + def allocate_workspace(self, size): + # Allocate memory only on first layer forward if self.config.layer_id == 0: - self.allocate_workspace(self.config.hidden_size, self.config.heads, - input.size()[1], - input.size()[0], DeepSpeedDiffusersAttention.layer_id, self.config.mp_size, False, - 0, self.config.max_out_tokens, self.config.min_out_tokens) - output = DeepSpeedDiffusersAttentionFunction.apply(input, context, input_mask, self.config, self.attn_qkvw, - self.attn_qw, self.attn_kw, self.attn_vw, self.attn_qkvb, - self.num_attention_heads_per_partition, self.norm_factor, - self.hidden_size_per_partition, self.attn_ow, self.attn_ob, - self.do_out_bias, self.score_context_func, self.linear_func, - self.triton_flash_attn_kernel, self.config.rope_theta) + self.workspace.allocate_workspace(self.config.hidden_size, self.config.heads, size[1], size[0], + DeepSpeedDiffusersAttention.layer_id, self.config.mp_size, False, 0, + self.config.max_out_tokens, self.config.min_out_tokens) + + def forward(self, input, context=None, input_mask=None): + self.allocate_workspace(input.size()) + output = DeepSpeedDiffusersAttentionFunction.apply( + input, context, input_mask, self.config, self.attn_qkvw, self.attn_qw, self.attn_kw, self.attn_vw, + self.attn_qkvb, self.num_attention_heads_per_partition, self.norm_factor, self.hidden_size_per_partition, + self.attn_ow, self.attn_ob, self.do_out_bias, self.score_context_func, self.linear_func, + self.pad_transform_func, self.triton_flash_attn_kernel, self.config.rope_theta) return output diff --git a/deepspeed/ops/transformer/inference/diffusers_transformer_block.py b/deepspeed/ops/transformer/inference/diffusers_transformer_block.py index b0156f905a06..d01638f36e40 100644 --- a/deepspeed/ops/transformer/inference/diffusers_transformer_block.py +++ b/deepspeed/ops/transformer/inference/diffusers_transformer_block.py @@ -10,26 +10,9 @@ from .diffusers_attention import DeepSpeedDiffusersAttention from .bias_add import nhwc_bias_add from .diffusers_2d_transformer import Diffusers2DTransformerConfig -from deepspeed.ops.op_builder import InferenceBuilder, SpatialInferenceBuilder from deepspeed.utils.types import ActivationFuncType - -# Ops will be loaded on demand -transformer_cuda_module = None -spatial_cuda_module = None - - -def load_transformer_module(): - global transformer_cuda_module - if transformer_cuda_module is None: - transformer_cuda_module = InferenceBuilder().load() - return transformer_cuda_module - - -def load_spatial_module(): - global spatial_cuda_module - if spatial_cuda_module is None: - spatial_cuda_module = SpatialInferenceBuilder().load() - return spatial_cuda_module +from .op_binding.gated_activation import GatedActivationOp +from .op_binding.layer_norm import LayerNormOp class DeepSpeedDiffusersTransformerBlock(nn.Module): @@ -76,8 +59,8 @@ def __init__(self, equivalent_module: nn.Module, config: Diffusers2DTransformerC else: self.attn_2_bias = nn.Paramaeter(torch.zeros_like(self.norm3_g), requires_grad=False) - self.transformer_cuda_module = load_transformer_module() - load_spatial_module() + self.gated_activation = GatedActivationOp() + self.layer_norm = LayerNormOp() def forward(self, hidden_states, context=None, timestep=None, **kwargs): # In v0.12.0 of diffuser, several new kwargs were added. Capturing @@ -88,17 +71,17 @@ def forward(self, hidden_states, context=None, timestep=None, **kwargs): if "encoder_hidden_states" in kwargs and kwargs["encoder_hidden_states"] is not None: context = kwargs["encoder_hidden_states"] - out_norm_1 = self.transformer_cuda_module.layer_norm(hidden_states, self.norm1_g, self.norm1_b, self.norm1_eps) + out_norm_1 = self.layer_norm(hidden_states, self.norm1_g, self.norm1_b, self.norm1_eps) out_attn_1 = self.attn_1(out_norm_1) - out_norm_2, out_attn_1 = self.transformer_cuda_module.layer_norm_residual_store_pre_ln_res( + out_norm_2, out_attn_1 = self.layer_norm.layer_norm_residual_store_pre_ln_res( out_attn_1, self.attn_1_bias, hidden_states, self.norm2_g, self.norm2_b, self.norm2_eps) out_attn_2 = self.attn_2(out_norm_2, context=context) - out_norm_3, out_attn_2 = self.transformer_cuda_module.layer_norm_residual_store_pre_ln_res( + out_norm_3, out_attn_2 = self.layer_norm.layer_norm_residual_store_pre_ln_res( out_attn_2, self.attn_2_bias, out_attn_1, self.norm3_g, self.norm3_b, self.norm3_eps) out_ff1 = nn.functional.linear(out_norm_3, self.ff1_w) - out_geglu = self.transformer_cuda_module.gated_activation(out_ff1, self.ff1_b, ActivationFuncType.GATED_GELU) + out_geglu = self.gated_activation(out_ff1, self.ff1_b, ActivationFuncType.GATED_GELU) out_ff2 = nn.functional.linear(out_geglu, self.ff2_w) return nhwc_bias_add(out_ff2, self.ff2_b, other=out_attn_2) diff --git a/deepspeed/ops/transformer/inference/ds_attention.py b/deepspeed/ops/transformer/inference/ds_attention.py index ffb58175daad..fc3a4664ea2e 100644 --- a/deepspeed/ops/transformer/inference/ds_attention.py +++ b/deepspeed/ops/transformer/inference/ds_attention.py @@ -89,7 +89,7 @@ def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count torch.empty(self.hidden_size_per_partition * 3, dtype=data_type_fp, device=device) ] - def compute_attention(self, qkv_out, input_mask, layer_past, alibi): + def compute_attention(self, qkv_out, input_mask, layer_past, alibi, is_prompt, token_idx, position_ids): if isinstance(qkv_out, list) or isinstance(qkv_out, tuple): qkv_out = qkv_out[0] @@ -108,7 +108,10 @@ def compute_attention(self, qkv_out, input_mask, layer_past, alibi): no_masking=no_masking, layer_id=self.config.layer_id, num_layers=DeepSpeedSelfAttention.num_layers, - alibi=alibi) + alibi=alibi, + is_prompt=is_prompt, + token_idx=token_idx, + position_ids=position_ids) context_layer, key_layer, value_layer = attn_key_value return context_layer, key_layer, value_layer @@ -136,7 +139,8 @@ def forward(self, output_attentions=False, norm_w=None, norm_b=None, - alibi=None): + alibi=None, + **kwargs): if self.attn_qkvw is None: self._attn_qkvw, self._attn_qkvb = self._merge_qkv() else: @@ -157,10 +161,17 @@ def forward(self, gamma=norm_w, beta=norm_b) + is_prompt = kwargs.get("first_token", qkv_out[0].shape[1] > 1) + token_idx = kwargs.get("token_idx", None) + position_ids = kwargs.get("position_ids", None) + context_layer, key_layer, value_layer = self.compute_attention(qkv_out=qkv_out, input_mask=input_mask, layer_past=layer_past, - alibi=alibi) + alibi=alibi, + is_prompt=is_prompt, + token_idx=token_idx, + position_ids=position_ids) output = self.vector_matmul_func(input=context_layer, weight=self.attn_ow) inp_norm = qkv_out[-1] @@ -210,7 +221,7 @@ def _split_tensor_along_last_dim(self, tensor, num_partitions, contiguous_split_ return tensor_list - def compute_attention(self, qkv_out, input_mask, layer_past, alibi): + def compute_attention(self, qkv_out, input_mask, layer_past, alibi, is_prompt, token_idx, position_ids): if isinstance(qkv_out, list) or isinstance(qkv_out, tuple): qkv_out = qkv_out[0] @@ -246,8 +257,10 @@ def compute_attention(self, qkv_out, input_mask, layer_past, alibi): matmul_result = torch.matmul(query_layer, key_layer) # change view to [batch_size, num_heads, q_length, k_length] attention_scores = matmul_result.view(output_size[0], output_size[1], output_size[2], -1) - - offset = dist.get_rank() * self.num_attention_heads_per_partition if dist.is_initialized() else 0 + if self.config.mp_size > 1 and dist.is_initialized(): + offset = dist.get_rank() * self.num_attention_heads_per_partition + else: + offset = 0 target_dtype = torch.float16 if self.config.dtype == torch.int8 else self.config.dtype # When using the hybrid engine with BLOOM, input_mask needs to be converted from torch.bool -> torch.int64 diff --git a/deepspeed/ops/transformer/inference/moe_inference.py b/deepspeed/ops/transformer/inference/moe_inference.py index 8766b65e866d..da3981c13714 100644 --- a/deepspeed/ops/transformer/inference/moe_inference.py +++ b/deepspeed/ops/transformer/inference/moe_inference.py @@ -7,16 +7,16 @@ import math import torch from torch.autograd import Function -# accelerator modules will be imported if needed -inference_module = None -specialized_mode = None import torch.nn as nn from .ds_attention import DeepSpeedSelfAttention from .config import DeepSpeedInferenceConfig +from .op_binding import SoftmaxOp, VectorMatMulOp, GELUGemmOp +from .op_binding.bias_residual import BiasResidualOp +from .op_binding.einsum_sec_sm_ecm import EinsumSecSmEcmOp +from .op_binding.layer_norm import LayerNormOp from ....moe.sharded_moe import TopKGate from deepspeed import comm as dist -from deepspeed.accelerator import get_accelerator -from deepspeed.ops.op_builder import InferenceBuilder +from .op_binding.moe_res_matmul import MoEResMatmulOp class DeepSpeedMoEInferenceConfig(DeepSpeedInferenceConfig): @@ -110,16 +110,13 @@ class DeepSpeedMLPFunction(Function): @staticmethod def forward(ctx, input, inter_w, inter_b, config, output_b, output_w, q_scales, q_groups, merge_count, mp_group, - async_op): + async_op, gelu_gemm_func, vector_matmul_func): if config.q_int8: - intermediate = inference_module.fused_gemm_gelu_int8(input, inter_w, inter_b, config.epsilon, q_scales[2], - (q_groups * (2**merge_count)), config.pre_layer_norm) - output = inference_module.vector_matmul_int8(intermediate, output_w, q_scales[3], q_groups, (merge_count)) + intermediate = gelu_gemm_func(input, inter_w, inter_b, config.epsilon, q_scales[2], + (q_groups * (2**merge_count)), config.pre_layer_norm) + output = vector_matmul_func(intermediate, output_w, q_scales[3], q_groups, (merge_count)) else: - mlp_gemm_func = inference_module.fused_gemm_gelu_fp16 if config.fp16 else \ - inference_module.fused_gemm_gelu_fp32 - - output = mlp_gemm_func(input, inter_w, inter_b, output_w, config.epsilon, config.pre_layer_norm, async_op) + output = gelu_gemm_func(input, inter_w, inter_b, output_w, config.epsilon, config.pre_layer_norm, async_op) if mp_group is not None and dist.get_world_size(group=mp_group) > 1: dist.all_reduce(output, group=mp_group, async_op=async_op) @@ -150,10 +147,13 @@ def __init__(self, config, q_scales=None, q_groups=1, merge_count=1, mlp_extra_g self.q_groups = q_groups * 2 if mlp_extra_grouping else q_groups self.merge_count = int(math.log2(merge_count)) self.mp_group = mp_group + self.gelu_gemm_func = GELUGemmOp(self.config) + self.vector_matmul_func = VectorMatMulOp(self.config) def forward(self, input, async_op=False): return DeepSpeedMLPFunction.apply(input, self.inter_w, self.inter_b, self.config, self.output_b, self.output_w, - self.q_scales, self.q_groups, self.merge_count, self.mp_group, async_op) + self.q_scales, self.q_groups, self.merge_count, self.mp_group, async_op, + self.gelu_gemm_func, self.vector_matmul_func) class DeepSpeedMoEInference(nn.Module): @@ -187,18 +187,7 @@ def __init__(self, self.config = config self.config.layer_id = DeepSpeedMoEInference.layer_id - global inference_module - global specialized_mode - if inference_module is None: - specialized_mode = False - # InferenceSpecializedBuilder is not among DeepSpeed provided builder yet, so we infer by builder name string - builder = get_accelerator().create_op_builder("InferenceSpecializedBuilder") - if builder is not None and builder.is_compatible(): - inference_module = builder.load() - specialized_mode = True - else: - inference_module = InferenceBuilder().load() - self.config.specialized_mode = specialized_mode + assert self.config.dtype != torch.bfloat16, "DeepSpeed MoE Transformer Inference not yet tested for bfloat support" DeepSpeedMoEInference.layer_id += 1 @@ -213,10 +202,8 @@ def __init__(self, self.res_mlp = DeepSpeedMoEMLP(config, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping, mp_group) self.res_coef = nn.Parameter(torch.Tensor(self.config.hidden_size, 2)) - self.coef_func = inference_module.softmax_fp16 if self.config.dtype in [torch.float16, torch.int8] else \ - inference_module.softmax_fp32 - self.vector_matmul_func = inference_module.vector_matmul_fp16 if self.config.dtype == torch.float16 else \ - inference_module.vector_matmul_fp32 + self.coef_func = SoftmaxOp(self.config) + self.vector_matmul_func = VectorMatMulOp(self.config) config.mp_size = 1 self.mlp = nn.ModuleList( @@ -234,12 +221,10 @@ def __init__(self, print("DeepSpeed MoE Transformer Inference config is ", self.config.__dict__) - self.bias_residual_func = inference_module.bias_residual_fp16 if self.config.dtype in [torch.float16, torch.int8] else \ - inference_module.bias_residual_fp32 - self.ds_layernorm = inference_module.layer_norm_fp16 if self.config.dtype in [torch.float16, torch.int8] else \ - inference_module.layer_norm_fp32 - self.einsum_sec_sm_ecm = inference_module.einsum_sec_sm_ecm_fp16 if self.config.dtype in [torch.float16, torch.int8] else \ - inference_module.einsum_sec_sm_ecm_fp32 + self.bias_residual_func = BiasResidualOp(self.config) + self.ds_layernorm = LayerNormOp(self.config) + self.einsum_sec_sm_ecm = EinsumSecSmEcmOp(self.config) + self.moe_res_matmul = MoEResMatmulOp(self.config) def res_coef_func(self, inp, async_op): inp = self.vector_matmul_func(inp, self.res_coef, async_op) @@ -346,7 +331,7 @@ def forward(self, dim=0)[dist.get_rank(group=self.expert_mp_group)] if self.config.mlp_type == 'residual': - inference_module.moe_res_matmul(res_mlp_out, res_coef_out, output) + self.moe_res_matmul(res_mlp_out, res_coef_out, output) output = self.bias_residual_func(output, residual_add, torch.empty(1)) diff --git a/deepspeed/ops/transformer/inference/op_binding/bias_add.py b/deepspeed/ops/transformer/inference/op_binding/bias_add.py new file mode 100644 index 000000000000..d2ae38f546eb --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/bias_add.py @@ -0,0 +1,31 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class BiasAddOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(BiasAddOp, self).__init__(config) + + try: + if self.config.dtype == torch.float16: + self.bias_add_func = self.inference_module.bias_add_fp16 + elif self.config.dtype == torch.bfloat16: + self.bias_add_func = self.inference_module.bias_add_bf16 + else: + self.bias_add_func = self.inference_module.bias_add_fp32 + except AttributeError: + self.bias_add_func = self.bias_add_fallback + + @classmethod + def bias_add_fallback(cls, input, bias): + return torch.add(input, bias) + + def forward(self, activation: torch.Tensor, bias: torch.Tensor): + return self.bias_add_func(activation, bias) diff --git a/deepspeed/ops/transformer/inference/op_binding/bias_gelu.py b/deepspeed/ops/transformer/inference/op_binding/bias_gelu.py new file mode 100644 index 000000000000..f0fee0b0d06e --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/bias_gelu.py @@ -0,0 +1,33 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import torch.nn.functional as F +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class BiasGeluOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(BiasGeluOp, self).__init__(config) + + try: + if self.config.dtype == torch.float16: + self.bias_gelu_func = self.inference_module.bias_gelu_fp16 + elif self.config.dtype == torch.bfloat16: + self.bias_gelu_func = self.inference_module.bias_gelu_bf16 + else: + self.bias_gelu_func = self.inference_module.bias_gelu_fp32 + except AttributeError: + self.bias_gelu_func = self.bias_gelu_fallback + + @classmethod + def bias_gelu_fallback(cls, activations, bias): + # Expected behavior is that of casting to float32 internally and using the tanh approximation + return F.gelu(activations.to(torch.float32) + bias.to(torch.float32), approximate='tanh').to(activations.dtype) + + def forward(self, activation: torch.Tensor, bias: torch.Tensor): + return self.bias_gelu_func(activation, bias) diff --git a/deepspeed/ops/transformer/inference/op_binding/bias_relu.py b/deepspeed/ops/transformer/inference/op_binding/bias_relu.py new file mode 100644 index 000000000000..ccfade1d9524 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/bias_relu.py @@ -0,0 +1,33 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import torch.nn.functional as F +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class BiasReluOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(BiasReluOp, self).__init__(config) + + try: + if self.config.dtype == torch.float16: + self.bias_relu_func = self.inference_module.bias_relu_fp16 + elif self.config.dtype == torch.bfloat16: + self.bias_relu_func = self.inference_module.bias_relu_bf16 + else: + self.bias_relu_func = self.inference_module.bias_relu_fp32 + except AttributeError: + self.bias_relu_func = self.bias_relu_fallback + + @classmethod + def bias_relu_fallback(cls, activations, bias): + # Expected behavior is that of casting to float32 internally + return F.relu(activations.to(torch.float32) + bias.to(torch.float32)).to(activations.dtype) + + def forward(self, activation: torch.Tensor, bias: torch.Tensor): + return self.bias_relu_func(activation, bias) diff --git a/deepspeed/ops/transformer/inference/op_binding/bias_residual.py b/deepspeed/ops/transformer/inference/op_binding/bias_residual.py new file mode 100644 index 000000000000..ecad50e10ffe --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/bias_residual.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class BiasResidualOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(BiasResidualOp, self).__init__(config) + + try: + if self.config.dtype in [torch.float16, torch.int8]: + self.bias_residual_func = self.inference_module.bias_residual_fp16 + else: + self.bias_residual_func = self.inference_module.bias_residual_fp32 + except AttributeError: + self.bias_residual_func = self.bias_residual_fallback + + @classmethod + def bias_residual_fallback(cls, output, residual, bias): + raise NotImplementedError("bias residual fallback isn't implemented") + + def forward(self, output, residual, bias): + return self.bias_residual_func(output, residual, bias) diff --git a/deepspeed/ops/transformer/inference/op_binding/einsum_sec_sm_ecm.py b/deepspeed/ops/transformer/inference/op_binding/einsum_sec_sm_ecm.py new file mode 100644 index 000000000000..f34b10f786d1 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/einsum_sec_sm_ecm.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class EinsumSecSmEcmOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(EinsumSecSmEcmOp, self).__init__(config) + + try: + if self.config.dtype in [torch.float16, torch.int8]: + self.einsum_sec_sm_ecm_func = self.inference_module.einsum_sec_sm_ecm_fp16 + else: + self.einsum_sec_sm_ecm_func = self.inference_module.einsum_sec_sm_ecm_fp32 + except AttributeError: + self.einsum_sec_sm_ecm_func = self.einsum_sec_sm_ecm_fallback + + @classmethod + def einsum_sec_sm_ecm_fallback(cls, Q, W): + raise NotImplementedError("einsum sec sm ecm fallback isn't implemented") + + def forward(self, Q, W): + return self.einsum_sec_sm_ecm_func(Q, W) diff --git a/deepspeed/ops/transformer/inference/op_binding/gated_activation.py b/deepspeed/ops/transformer/inference/op_binding/gated_activation.py new file mode 100644 index 000000000000..d28d818ce4b3 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/gated_activation.py @@ -0,0 +1,40 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import torch.nn.functional as F +from deepspeed.utils.types import ActivationFuncType +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class GatedActivationOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(GatedActivationOp, self).__init__(config) + try: + self.gated_activation_func = self.inference_module.gated_activation + except AttributeError: + self.gated_activation_func = self.gated_activation_fallback + + @classmethod + def gated_activation_fallback(cls, activation, bias, activation_func_type): + # Expected behavior is that of casting to float32 internally + # Explicitly using the default GeLU + activation_func = None + activations = activation + bias.reshape(1, 1, -1) + hidden_states, gate = activations.chunk(2, dim=-1) + + if activation_func_type == ActivationFuncType.GATED_SILU: + activation_func = F.silu + elif activation_func_type == ActivationFuncType.GATED_GELU: + activation_func = F.gelu + + return hidden_states * activation_func(gate.to(torch.float32)).to(activations.dtype) + + def forward(self, activation: torch.Tensor, bias: torch.Tensor, activation_func_type: ActivationFuncType): + return self.gated_activation_func(activation, bias, activation_func_type) diff --git a/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py b/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py index 63323c150752..60bbb4b48bdb 100644 --- a/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py +++ b/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py @@ -4,6 +4,7 @@ # DeepSpeed Team import torch +import torch.nn.functional as F from ..config import DeepSpeedInferenceConfig from .base import BaseOp import deepspeed @@ -14,7 +15,9 @@ class GELUGemmOp(BaseOp): def __init__(self, config: DeepSpeedInferenceConfig): super(GELUGemmOp, self).__init__(config) try: - if self.config.dtype in [torch.float16, torch.int8]: + if self.config.dtype == torch.int8: + self.fused_gemm_gelu = self.inference_module.fused_gemm_gelu_int8 + elif self.config.dtype == torch.float16: if deepspeed.HAS_TRITON and self.config.use_triton and self.config.dtype == torch.float16: from deepspeed.ops.transformer.inference.triton.ops import fused_gemm_gelu as _triton_fused_gemm_gelu self.fused_gemm_gelu = _triton_fused_gemm_gelu # type: ignore @@ -28,7 +31,11 @@ def __init__(self, config: DeepSpeedInferenceConfig): self.fused_gemm_gelu = self.gelu_gemm_fallback def gelu_gemm_fallback(self, input, weight, scale, bias, out, out_scale, dtype, transpose): - raise NotImplementedError + tmp = torch.matmul(input, weight) + tmp = F.gelu(tmp.to(torch.float32) + bias.to(torch.float32), approximate="tanh").to(tmp.dtype) + output = torch.matmul(tmp, out) + + return output def forward(self, input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, weight_out: torch.Tensor): diff --git a/deepspeed/ops/transformer/inference/op_binding/layer_norm.py b/deepspeed/ops/transformer/inference/op_binding/layer_norm.py new file mode 100644 index 000000000000..31219a58ac3c --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/layer_norm.py @@ -0,0 +1,60 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import torch.nn.functional as F +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class LayerNormOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + super(LayerNormOp, self).__init__(config) + try: + if config is None: + self.layer_norm_func = self.inference_module.layer_norm + elif self.config.dtype in [torch.float16, torch.int8]: + self.layer_norm_func = self.inference_module.layer_norm_fp16 + else: + self.layer_norm_func = self.inference_module.layer_norm_fp32 + except AttributeError: + self.layer_norm_func = self.layer_norm_fallback + + @classmethod + def layer_norm_residual(cls, vals, bias, res, gamma, beta, epsilon): + channels = gamma.shape[0] + dtype = gamma.dtype + vals_f = vals.to(torch.float32) + bias_f = bias.to(torch.float32).reshape(1, 1, -1) + res_f = res.to(torch.float32) + gamma_f = gamma.to(torch.float32) + beta_f = beta.to(torch.float32) + return F.layer_norm(vals_f + bias_f + res_f, (channels, ), weight=gamma_f, bias=beta_f, eps=epsilon).to(dtype) + + @classmethod + def layer_norm_residual_store_pre_ln_res(cls, vals, bias, res, gamma, beta, epsilon): + channels = gamma.shape[0] + dtype = gamma.dtype + vals_f = vals.to(torch.float32) + bias_f = bias.to(torch.float32).reshape(1, 1, -1) + res_f = res.to(torch.float32) + gamma_f = gamma.to(torch.float32) + beta_f = beta.to(torch.float32) + res_output = vals_f + bias_f + res_f + norm_output = F.layer_norm(res_output, (channels, ), weight=gamma_f, bias=beta_f, eps=epsilon).to(dtype) + return norm_output, res_output.to(dtype) + + @classmethod + def layer_norm_fallback(cls, vals, gamma, beta, epsilon): + channels = gamma.shape[0] + dtype = gamma.dtype + vals_f = vals.to(torch.float32) + gamma_f = gamma.to(torch.float32) + beta_f = beta.to(torch.float32) + return F.layer_norm(vals_f, (channels, ), weight=gamma_f, bias=beta_f, eps=epsilon).to(dtype) + + def forward(self, vals, gamma, beta, epsilon): + return self.layer_norm_func(vals, gamma, beta, epsilon) diff --git a/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py b/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py index 3064c00d1755..97daf8b74bd8 100644 --- a/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py +++ b/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py @@ -5,12 +5,12 @@ from typing import Optional -import os import torch import torch.nn.functional as F from ..config import DeepSpeedInferenceConfig from .base import BaseOp from deepspeed.utils.types import NormType +from .pre_rms_norm import PreRMSNormOp class MLPGemmOp(BaseOp): @@ -39,23 +39,46 @@ def __init__(self, config: DeepSpeedInferenceConfig): self.mlp_gemm_func = self.mlp_gemm_fallback elif self.config.norm_type == NormType.RMSNorm: self.mlp_gemm_func = self.rms_mlp_gemm_fallback + self.pre_rms_norm = PreRMSNormOp() def mlp_gemm_fallback(self, input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, pre_layer_norm, mlp_after_attn, interm_scale, out_scale, dtype, mlp_act_func_type, transpose): - if os.environ.get('DS_KI_FALLBACK') == 'True' and mlp_after_attn and not transpose: - residual_add = F.layer_norm(input + residual + input_bias, (input.shape[2], ), gamma, beta, - self.config.epsilon) - tmp = torch.matmul(residual_add, weight_interm) + if mlp_after_attn: + residual_add = F.layer_norm(input + residual + input_bias, (input.shape[2], ), gamma, beta, eps) + tmp = torch.matmul(residual_add, weight_interm.t() if transpose else weight_interm) tmp = F.gelu(tmp + bias) - output = torch.matmul(tmp, weight_out) - return (output, residual_add) + output = torch.matmul(tmp, weight_out.t() if transpose else weight_out) + + return output, residual_add else: + # TODO: SW-151870 implement mlp_gemm_fallback raise NotImplementedError def rms_mlp_gemm_fallback(self, input, residual, weight_interm, weight_out, gamma, eps, interm_scale, out_scale, dtype, mlp_act_func_type, transpose): - raise NotImplementedError + inp_norm, residual = self.pre_rms_norm(input, residual, gamma, eps) + tmp = torch.matmul(inp_norm.view([-1, inp_norm.size(2)]), weight_interm.t() if transpose else weight_interm) + up_proj, gate_proj = tmp.chunk(2, dim=1) + + from deepspeed.utils.types import ActivationFuncType + if mlp_act_func_type == ActivationFuncType.GELU: + intermediate = F.gelu(gate_proj) + elif mlp_act_func_type == ActivationFuncType.ReLU: + intermediate = F.relu(gate_proj) + elif mlp_act_func_type == ActivationFuncType.GATED_GELU: + intermediate = F.gelu(gate_proj) + elif mlp_act_func_type == ActivationFuncType.GATED_SILU: + intermediate = F.silu(gate_proj) + else: + raise f"rms_mlp_gemm_fallback not implemented for activation type {mlp_act_func_type}" + + intermediate = intermediate * up_proj + + output = torch.matmul(intermediate, weight_out.t() if transpose else weight_out) + output = output.view([input.size(0), input.size(1), -1]) + + return [output, residual] def forward(self, input: torch.Tensor, diff --git a/deepspeed/ops/transformer/inference/op_binding/moe_res_matmul.py b/deepspeed/ops/transformer/inference/op_binding/moe_res_matmul.py new file mode 100644 index 000000000000..ef3558c8bc88 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/moe_res_matmul.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class MoEResMatmulOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(MoEResMatmulOp, self).__init__(config) + try: + self.moe_res_matmul_func = self.inference_module.moe_res_matmul + except AttributeError: + self.moe_res_matmul_func = self.moe_res_matmul_fallback + + @classmethod + def moe_res_matmul_fallback(cls, residual, coef, output): + coef_t = coef.transpose(1, 2).contiguous() + coef1, coef2 = torch.split(coef_t, split_size_or_sections=coef_t.shape[len(coef_t.shape) - 1] // 2, dim=-1) + return residual * coef1 + output * coef2 + + def forward(self, residual, coef, output): + return self.moe_res_matmul_func(residual, coef, output) diff --git a/deepspeed/ops/transformer/inference/op_binding/pad_transform.py b/deepspeed/ops/transformer/inference/op_binding/pad_transform.py new file mode 100644 index 000000000000..876fefc3bcfb --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/pad_transform.py @@ -0,0 +1,26 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class PadTransformOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(PadTransformOp, self).__init__(config) + try: + self.pad_transform_func = self.inference_module.pad_transform_fp16 + except AttributeError: + self.pad_transform_func = self.pad_transform_fallback + + @staticmethod + def pad_transform_fallback(query, key, value, heads, do_flash_attn): + raise NotImplementedError("pad_transform fallback is not implemented.") + + def forward(self, query, key, value, heads, do_flash_attn): + return self.pad_transform_func(query, key, value, heads, do_flash_attn) diff --git a/deepspeed/ops/transformer/inference/op_binding/pre_rms_norm.py b/deepspeed/ops/transformer/inference/op_binding/pre_rms_norm.py new file mode 100644 index 000000000000..7969d20f0527 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/pre_rms_norm.py @@ -0,0 +1,31 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp +from .rms_norm import RMSNormOp + + +class PreRMSNormOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(PreRMSNormOp, self).__init__(config) + try: + self.pre_rms_norm_func = self.inference_module.pre_rms_norm + except AttributeError: + self.pre_rms_norm_func = self.pre_rms_norm_fallback + + @staticmethod + def pre_rms_norm_fallback(vals, residual, gamma, epsilon): + residual = vals.to(torch.float32) + residual.to(torch.float32) + vals = residual + + return RMSNormOp.rms_norm_fallback(vals, gamma, epsilon), residual.to(gamma.dtype) + + def forward(self, vals, residual, gamma, epsilon): + return self.pre_rms_norm_func(vals, residual, gamma, epsilon) diff --git a/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py b/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py index 250bf9864e1e..9ff5366fae5d 100644 --- a/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py +++ b/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py @@ -3,11 +3,11 @@ # DeepSpeed Team -import os import torch import torch.nn.functional as F from ..config import DeepSpeedInferenceConfig from .base import BaseOp +from .rms_norm import RMSNormOp import deepspeed from deepspeed.utils.types import NormType @@ -56,19 +56,23 @@ def _triton_autotune(min_seqlen, max_seqlen, hidden_size, dtype=torch.float16): matmul(A, B) Fp16Matmul._update_autotune_table() - def qkv_gemm_fallback(self, input, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose): - if os.environ.get('DS_KI_FALLBACK') == 'True' and not transpose: - inp_norm = F.layer_norm(input, (input.shape[2], ), gamma, beta, eps) - tmp = torch.matmul(inp_norm, weight) - if add_bias: - tmp += bias - output = [tmp, inp_norm] - return output - else: - raise NotImplementedError + @staticmethod + def qkv_gemm_fallback(input, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose): + inp_norm = F.layer_norm(input, (input.shape[2], ), gamma, beta, eps) + tmp = torch.matmul(inp_norm, weight.t() if transpose else weight) + if add_bias: + tmp += bias + output = [tmp, inp_norm] + + return output + + @staticmethod + def rms_qkv_gemm_fallback(input, weight, q_scale, gamma, eps, q_int8, transpose): + inp_norm = RMSNormOp.rms_norm_fallback(input, gamma, eps) + tmp = torch.matmul(inp_norm, weight.t() if transpose else weight) + output = [tmp, inp_norm] - def rms_qkv_gemm_fallback(self, input, weight, q_scale, gamma, eps, q_int8, transpose): - raise NotImplementedError + return output def forward(self, input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, gamma: torch.Tensor, beta: torch.Tensor): diff --git a/deepspeed/ops/transformer/inference/op_binding/residual_add.py b/deepspeed/ops/transformer/inference/op_binding/residual_add.py index 6f9b35cbc05d..93b229c5d1ac 100644 --- a/deepspeed/ops/transformer/inference/op_binding/residual_add.py +++ b/deepspeed/ops/transformer/inference/op_binding/residual_add.py @@ -3,9 +3,10 @@ # DeepSpeed Team -import os import torch from typing import Optional + +from .vector_add import VectorAddOp from ..config import DeepSpeedInferenceConfig from .base import BaseOp @@ -22,11 +23,32 @@ def __init__(self, config: DeepSpeedInferenceConfig): else: self.residual_add_func = self.inference_module.residual_add_bias_fp32 except AttributeError: - self.residual_add_func = None - try: - self._vector_add = self.inference_module._vector_add - except AttributeError: - self._vector_add = None + self.residual_add_func = self.residual_add_fallback + self.vector_add = VectorAddOp() + + @staticmethod + def res_add_bias(hidden_state, residual, attn_output, attn_bias, final_bias, add_attn_bias, mp_size): + hidden_state += attn_output + (residual + final_bias) / mp_size + if add_attn_bias: + hidden_state += attn_bias / mp_size + + return hidden_state + + @staticmethod + def residual_add_fallback(hidden_state, residual, attention_output, attention_bias, final_bias, mp_size, + mlp_after_attn, add_bias, pre_layer_norm): + if mlp_after_attn: + if pre_layer_norm: + tmp = (residual.float() + attention_output.float() + attention_bias.float() + + final_bias.float()) / mp_size + hidden_state.float() + else: + tmp = residual.float() + hidden_state.float() + final_bias.float() + else: + tmp = ResidualAddOp.res_add_bias(hidden_state, residual, attention_output, attention_bias, final_bias, + add_bias, mp_size) + residual.copy_(tmp.to(hidden_state.dtype)) + + return residual def forward(self, hidden_state: torch.Tensor, @@ -37,28 +59,15 @@ def forward(self, attention_bias: Optional[torch.Tensor] = None, final_bias: Optional[torch.Tensor] = None): - if self.residual_add_func is not None: - if final_bias is None: - residual = self._vector_add(residual, hidden_state, 1.0 / self.config.mp_size) - else: - if not self.config.pre_layer_norm and residual_add is not None: - # only use residual add if its set and we are not pre layer norm - residual = residual_add - - self.residual_add_func(hidden_state, residual, attention_output, attention_bias, final_bias, - self.config.mp_size, self.config.mlp_after_attn, add_bias, - self.config.pre_layer_norm) + if final_bias is None and attention_bias is None: + residual = self.vector_add(residual + attention_output, hidden_state, 1.0 / self.config.mp_size) else: - # fallback - if os.environ.get('DS_KI_FALLBACK') == 'True' and self.config.mlp_after_attn: - if self.config.pre_layer_norm: - tmp = (residual.float() + attention_output.float() + attention_bias.float() + - final_bias.float()) / self.config.mp_size + hidden_state.float() - else: - tmp = residual.float() + hidden_state.float() + final_bias.float() + if not self.config.pre_layer_norm and residual_add is not None: + # only use residual add if its set and we are not pre layer norm + residual = residual_add + + self.residual_add_func(hidden_state, residual, attention_output, attention_bias, final_bias, + self.config.mp_size, self.config.mlp_after_attn, add_bias, + self.config.pre_layer_norm) - input_dtype = hidden_state.dtype - residual = tmp.to(input_dtype) - else: - raise NotImplementedError return residual diff --git a/deepspeed/ops/transformer/inference/op_binding/rms_norm.py b/deepspeed/ops/transformer/inference/op_binding/rms_norm.py new file mode 100644 index 000000000000..128883ce5d43 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/rms_norm.py @@ -0,0 +1,33 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class RMSNormOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(RMSNormOp, self).__init__(config) + try: + self.rms_norm_func = self.inference_module.rms_norm + except AttributeError: + self.rms_norm_func = self.rms_norm_fallback + + @staticmethod + def rms_norm_fallback(vals, gamma, epsilon): + variance = vals.to(torch.float32).pow(2).mean(-1, keepdim=True) + vals = vals * torch.rsqrt(variance + epsilon) + + if gamma.dtype in [torch.float16, torch.bfloat16]: + vals = vals.to(gamma.dtype) + + return gamma * vals + + def forward(self, vals, gamma, epsilon): + return self.rms_norm_func(vals, gamma, epsilon) diff --git a/deepspeed/ops/transformer/inference/op_binding/softmax.py b/deepspeed/ops/transformer/inference/op_binding/softmax.py index bc309d94df14..b408883d5cfd 100644 --- a/deepspeed/ops/transformer/inference/op_binding/softmax.py +++ b/deepspeed/ops/transformer/inference/op_binding/softmax.py @@ -3,11 +3,11 @@ # DeepSpeed Team -import os import torch import torch.nn.functional as F from ..config import DeepSpeedInferenceConfig from .base import BaseOp +from deepspeed.ops.transformer.inference.op_binding.workspace import InferenceContext class SoftmaxOp(BaseOp): @@ -25,24 +25,45 @@ def __init__(self, config: DeepSpeedInferenceConfig): except AttributeError: self.softmax_func = self.softmax_fallback - def softmax_fallback(self, attn_scores, attn_mask, alibi, triangular, recompute, local_attention, window_size, - async_op, layer_scale, head_offset, mp_size): - if os.environ.get('DS_KI_FALLBACK') == 'True': - alibi = alibi[head_offset:head_offset + self.num_attention_heads_per_partition] - input_dtype = attn_scores.dtype - if (triangular): - tri = ~torch.tril(torch.ones(attn_scores.size(), device=attn_scores.device)).to(bool) - attn_scores = torch.masked_fill(attn_scores * layer_scale, tri, torch.finfo(input_dtype).min) - if alibi is not None: - attn_scores += alibi - if attn_mask is not None: - # expand atten_mask from two dim into 4 dim, insert two dims in the middle + @staticmethod + def softmax_fallback(attn_scores, attn_mask, alibi, triangular, recompute, local_attention, window_size, async_op, + layer_scale, head_offset, mp_size): + scores_len = len(attn_scores.size()) + heads = 1 + if scores_len > 1: + heads = attn_scores.size()[1] + num_attention_heads_per_partition = heads // mp_size + + if alibi is not None: + if len(alibi.shape) == 1: + alibi = None + else: + alibi = alibi[head_offset:head_offset + num_attention_heads_per_partition] + if attn_mask is not None and len(attn_mask.shape) == 1: + attn_mask = None + input_dtype = attn_scores.dtype + attn_scores *= layer_scale + + if alibi is not None: + attn_scores += alibi + if attn_mask is not None: + # expand atten_mask from two dim into 4 dim, insert two dims in the middle + if len(attn_mask.shape) == 2: + # The above if statement was added because the mask was already 4D so this + # expansion should be avoided as it expands to 6D and crashes later (in bloom + # HE KI FB) attn_mask = attn_mask[:, None, None, :] - attn_scores += attn_mask - output = F.softmax(attn_scores, dim=-1, dtype=torch.float32).to(input_dtype) - return output - else: - raise NotImplementedError + attn_scores += attn_mask + if triangular: + if attn_scores.shape[2] == 1: # query using kv cache + token_idx = InferenceContext.Instance().current_tokens() + tri = torch.arange(attn_scores.shape[2], device=attn_scores.device).ge(token_idx) + else: + tri = ~torch.tril(torch.ones(attn_scores.size(), device=attn_scores.device)).to(bool) + attn_scores = torch.masked_fill(attn_scores, tri, float('-inf')) + output = F.softmax(attn_scores, dim=-1, dtype=torch.float32).to(input_dtype) + + return output def forward(self, attn_scores: torch.Tensor, attn_mask: torch.Tensor, alibi: torch.Tensor, triangular: bool, recompute: bool, local_attention: bool, window_size: int, async_op: bool, layer_scale: float, diff --git a/deepspeed/ops/transformer/inference/op_binding/softmax_context.py b/deepspeed/ops/transformer/inference/op_binding/softmax_context.py index 0dc4e08a3633..ccb0126922f5 100644 --- a/deepspeed/ops/transformer/inference/op_binding/softmax_context.py +++ b/deepspeed/ops/transformer/inference/op_binding/softmax_context.py @@ -7,6 +7,8 @@ from deepspeed import comm as dist from ..config import DeepSpeedInferenceConfig from .base import BaseOp +from .softmax import SoftmaxOp +from deepspeed.ops.transformer.inference.op_binding.workspace import InferenceContext class SoftmaxContextOp(BaseOp): @@ -23,13 +25,109 @@ def __init__(self, config: DeepSpeedInferenceConfig): except AttributeError: self.softmax_context_func = self.softmax_context_fallback + @staticmethod + def transform4d_0213(x, seq_length): + assert x.dim() == 3, F"Dim {x.dim()} is not supported" + batch_size, num_heads, seq_length_head_dim = x.shape + head_dim = seq_length_head_dim // seq_length + x = x.view(batch_size, num_heads, seq_length, head_dim) + x = x.permute(0, 2, 1, 3) + + return x + + @staticmethod + def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep <= 1 or num_key_value_heads == 1: + return hidden_states + + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + @staticmethod + def bias_add_transform_0213(input, bias, num_heads, trans_count, perform_bias=False): + assert trans_count == 1 or trans_count == 3, F"Trans count {trans_count} is not supported" + assert input.dim() == 3, F"Dim {input.dim()} is not supported" + input_biased = torch.add(input, bias) if perform_bias else input + batch_size, seq_length, value_size = input_biased.shape + hid_dim = value_size // trans_count + head_dim = hid_dim // num_heads + + if trans_count == 1: + query_layer = input.view(batch_size, seq_length, num_heads, head_dim) + query_layer = query_layer.permute(0, 2, 1, 3) + key_layer = torch.zeros_like(query_layer) + value_layer = torch.zeros_like(query_layer) + return query_layer, key_layer, value_layer + + qkv_layers = input.view(batch_size, seq_length, 3, num_heads, head_dim) + query_layer, key_layer, value_layer = qkv_layers[..., 0, :, :], qkv_layers[..., 1, :, :], qkv_layers[..., + 2, :, :] + query_layer = query_layer.transpose(1, 2) + key_layer = key_layer.transpose(1, 2) + value_layer = value_layer.transpose(1, 2) + + return query_layer, key_layer, value_layer + def softmax_context_fallback(self, query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, heads, num_kv, norm_factor, triangular_masking, local_attention, window_size, no_masking, - layer_id, num_layers, alibi, rope_theta): - raise NotImplementedError + layer_id, num_layers, alibi, rope_theta, is_prompt, token_idx, position_ids): + bat_0213_query, bat_0213_key, bat_0213_value = self.bias_add_transform_0213( + query_key_value, None, heads, 3, False) + + if rotary_dim > 0 and rotate_half: + from transformers.models.llama.modeling_llama import apply_rotary_pos_emb + + rotary = InferenceContext.Instance().get_rotary(rotary_dim, rope_theta, bat_0213_value.device) + cos, sin = rotary(bat_0213_value, InferenceContext.Instance().get_max_tokens_num()) + # TODO: SW-170999 Optimize RoPE implementation. + bat_0213_query, bat_0213_key = apply_rotary_pos_emb(bat_0213_query, bat_0213_key, cos, sin, position_ids) + + bat_0213_key, bat_0213_value = InferenceContext.Instance().update_cache(layer_id, token_idx, is_prompt, + bat_0213_key, bat_0213_value) + + bat_0213_key = self.repeat_kv(bat_0213_key, num_kv) + bat_0213_value = self.repeat_kv(bat_0213_value, num_kv) + + bsz = query_key_value.shape[0] + head_dim = query_key_value.shape[2] // (heads * 3) + + bmm_output = torch.bmm(bat_0213_query.reshape(bsz * heads, bat_0213_query.shape[2], head_dim), + bat_0213_key.reshape(bsz * heads, bat_0213_key.shape[2], head_dim).transpose(1, 2)) + + layer_scale = 1.0 + if alibi is not None and len(alibi.shape) > 1: + layer_scale = max(1, layer_id).to(float) + + alpha = norm_factor * norm_factor / layer_scale + bmm_output *= alpha + bmm_output_reshape = bmm_output.reshape(bsz, heads, bmm_output.shape[1], bmm_output.shape[2]) + + recompute = is_prompt + if attn_mask is not None and len(attn_mask.shape) > 1 and attn_mask.shape[-1] < bmm_output_reshape.shape[3]: + attn_mask = torch.nn.functional.pad(attn_mask, (0, bmm_output_reshape.shape[3] - attn_mask.shape[-1]), + value=torch.finfo(attn_mask.dtype).min) + softmax_output = SoftmaxOp.softmax_fallback(bmm_output_reshape, attn_mask, alibi, triangular_masking, + recompute, local_attention, window_size, None, layer_scale, 0, 1) + + output = torch.bmm(softmax_output.reshape(bsz * heads, softmax_output.shape[2], softmax_output.shape[3]), + bat_0213_value.reshape(bsz * heads, bat_0213_value.shape[2], head_dim)) + + output = output.reshape(bsz, heads, output.shape[1], head_dim) + output = output.reshape(bsz, heads, output.shape[2] * head_dim) + input_seq_len = query_key_value.shape[1] + t4d_0123_output = self.transform4d_0213(output, input_seq_len) + t4d_0123_output = t4d_0123_output.reshape(bsz, t4d_0123_output.shape[1], heads * head_dim) + + if layer_id == num_layers - 1: + InferenceContext.Instance().advance_tokens() + + return t4d_0123_output, bat_0213_key, bat_0213_value def forward(self, query_key_value: torch.Tensor, attn_mask: torch.Tensor, heads: int, num_kv: int, - norm_factor: float, no_masking: bool, layer_id: int, num_layers: int, alibi: torch.Tensor): + norm_factor: float, no_masking: bool, layer_id: int, num_layers: int, alibi: torch.Tensor, + is_prompt: bool, token_idx: torch.Tensor, position_ids: torch.Tensor): if alibi is not None: batch_heads = query_key_value.shape[0] * heads @@ -42,6 +140,6 @@ def forward(self, query_key_value: torch.Tensor, attn_mask: torch.Tensor, heads: self.config.rotate_every_two, heads, num_kv, norm_factor, self.config.triangular_masking, self.config.local_attention, self.config.window_size, no_masking, layer_id, num_layers, alibi, - self.config.rope_theta) + self.config.rope_theta, is_prompt, token_idx, position_ids) return output diff --git a/deepspeed/ops/transformer/inference/op_binding/vector_add.py b/deepspeed/ops/transformer/inference/op_binding/vector_add.py new file mode 100644 index 000000000000..015340a1084b --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/vector_add.py @@ -0,0 +1,28 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class VectorAddOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(VectorAddOp, self).__init__(config) + try: + self.vector_add_func = self.inference_module._vector_add + except AttributeError: + self.vector_add_func = self.vector_add_fallback + + @classmethod + def vector_add_fallback(cls, a, b, gamma): + """Based on csrc/transformer/inference/csrc/pt_binding.cpp code of _vector_add""" + dtype = a.dtype + return (gamma * a.float() + b.float()).to(dtype) + + def forward(self, a, b, gamma): + return self.vector_add_func(a, b, gamma) diff --git a/deepspeed/ops/transformer/inference/op_binding/vector_matmul.py b/deepspeed/ops/transformer/inference/op_binding/vector_matmul.py index 011be859634d..cabab8d8c4ab 100644 --- a/deepspeed/ops/transformer/inference/op_binding/vector_matmul.py +++ b/deepspeed/ops/transformer/inference/op_binding/vector_matmul.py @@ -3,7 +3,6 @@ # DeepSpeed Team -import os import torch from ..config import DeepSpeedInferenceConfig from .base import BaseOp @@ -25,7 +24,7 @@ def __init__(self, config: DeepSpeedInferenceConfig): else: self.vector_matmul_func = self.inference_module.vector_matmul_fp16 elif self.config.dtype == torch.int8: - self.vector_matmul_func = self.inference_module.vector_matmul_fp16 + self.vector_matmul_func = self.inference_module.vector_matmul_int8 elif self.config.dtype == torch.bfloat16: self.vector_matmul_func = self.inference_module.vector_matmul_bf16 else: @@ -34,10 +33,7 @@ def __init__(self, config: DeepSpeedInferenceConfig): self.vector_matmul_func = self.vector_matmul_fallback def vector_matmul_fallback(self, input, weight, async_op, q_scale, q_int8, transpose): - if os.environ.get('DS_KI_FALLBACK') == 'True' and not transpose: - return torch.matmul(input, weight) - else: - raise NotImplementedError + return torch.matmul(input, weight.t() if transpose else weight) def forward(self, input: torch.Tensor, weight: torch.Tensor, async_op: bool = False): q_scale = weight.scale if hasattr(weight, 'scale') else torch.empty(1) diff --git a/deepspeed/ops/transformer/inference/op_binding/workspace.py b/deepspeed/ops/transformer/inference/op_binding/workspace.py new file mode 100644 index 000000000000..19de7d9576af --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/workspace.py @@ -0,0 +1,222 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + +minus_inf = -10000.0 +key_idx = 0 +value_idx = 1 + + +class InferenceContext: + + __instance = None + + def __init__(self): + self.kv_cache = None + self.kv_cache_elem_dtype = None + self.num_tokens = 1 + self.kv_cache_num_layers = None + self.kv_cache_size = None + self.max_out_tokens = None + self.rotary = None + self.allocate_called = False + self.static_shapes = True + + @classmethod + def Instance(cls): + if InferenceContext.__instance is None: + InferenceContext.__instance = InferenceContext() + return InferenceContext.__instance + + def gen_workspace(self, num_layers, num_heads, batch_size, prompt_len, hidden_dim, mp_size, external_cache, + elem_dtype, rank, max_out_tokens, min_out_tokens): + self.allocate_called = True + self.kv_cache = None + if not external_cache: + self.kv_cache_num_layers = num_layers + self.max_out_tokens = max_out_tokens + head_size = hidden_dim // num_heads + self.kv_cache_size = torch.Size([batch_size, (num_heads // mp_size), max_out_tokens, head_size]) + self.kv_cache_elem_dtype = elem_dtype + self.num_tokens = 0 + self.static_shapes = True + return True + + def retake_workspace(self): + return True + + def _retake_workspace(self): + assert self.allocate_called, "retake workspace called before allocate workspace" + + import deepspeed.accelerator as accelerator + if self.kv_cache is None: + self.kv_cache = [] + for layer in range(self.kv_cache_num_layers): + self.kv_cache.append((torch.zeros(self.kv_cache_size, + dtype=self.kv_cache_elem_dtype, + device=accelerator.get_accelerator().device_name()), + torch.zeros(self.kv_cache_size, + dtype=self.kv_cache_elem_dtype, + device=accelerator.get_accelerator().device_name()))) + + return True + + def update_cache(self, layer_id, token_idx, is_prompt, bat_0213_key, bat_0213_value): + has_workspace = self._retake_workspace() + assert has_workspace, "Could not allocate workspace" + + # Update current token + if is_prompt: + self.static_shapes = True + if token_idx is None: + self.static_shapes = False + InferenceContext.Instance().reset_tokens(bat_0213_key.shape[2]) + else: + InferenceContext.Instance().reset_tokens(token_idx) + + if token_idx is None: + token_idx = InferenceContext.Instance().current_tokens() + + bsz = bat_0213_key.shape[0] + + # Update cache content + if is_prompt: + cache_max_seq = self.kv_cache_size[2] + cache_max_head_dim = self.kv_cache_size[3] + seq = bat_0213_key.shape[2] + + mask = torch.arange(cache_max_seq, device=bat_0213_key.device) + mask = mask.ge(token_idx) + mask = mask.unsqueeze(-1) + mask = mask.expand([cache_max_seq, cache_max_head_dim]) + + self.kv_cache[layer_id][key_idx][:bsz, :, :seq, :].copy_(bat_0213_key) + self.kv_cache[layer_id][key_idx][:bsz, :].masked_fill_(mask, 0) + self.kv_cache[layer_id][value_idx][:bsz, :, :seq, :].copy_(bat_0213_value) + self.kv_cache[layer_id][value_idx][:bsz, :].masked_fill_(mask, 0) + else: + if self.static_shapes: + assert type(token_idx) == torch.Tensor, "token_idx is expected to be torch.Tensor" + self.kv_cache[layer_id][key_idx][:bsz].index_copy_(2, token_idx - 1, bat_0213_key) + self.kv_cache[layer_id][value_idx][:bsz].index_copy_(2, token_idx - 1, bat_0213_value) + else: + assert type(token_idx) == int, "token_idx is expected to be int" + self.kv_cache[layer_id][key_idx][:bsz, :, token_idx - 1:token_idx, :] = bat_0213_key + self.kv_cache[layer_id][value_idx][:bsz, :, token_idx - 1:token_idx, :] = bat_0213_value + + bat_0213_key = self.kv_cache[layer_id][key_idx][:bsz] + bat_0213_value = self.kv_cache[layer_id][value_idx][:bsz] + + if not self.static_shapes: + bat_0213_key = bat_0213_key[:, :, :token_idx, :] + bat_0213_value = bat_0213_value[:, :, :token_idx, :] + + return bat_0213_key, bat_0213_value + + def release_workspace(self): + self.kv_cache = None + self.rotary = None + + def reset_tokens(self, initial_tokens=1): + self.num_tokens = initial_tokens + + def current_tokens(self): + return self.num_tokens + + def advance_tokens(self): + self.num_tokens = self.num_tokens + 1 + + def get_kv_cache(self): + return self.kv_cache + + def get_rotary(self, rotary_dim, rope_theta, device=None): + if self.rotary is None: + from transformers.models.llama.modeling_llama import LlamaRotaryEmbedding + + self.rotary = LlamaRotaryEmbedding(rotary_dim, base=rope_theta, device=device) + + return self.rotary + + def get_max_tokens_num(self): + return self.max_out_tokens + + +class WorkspaceOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + self.inference_context = InferenceContext.Instance() + self._is_allocated = False + try: + super(WorkspaceOp, self).__init__(config) + if config.dtype == torch.float32: + self.allocate_workspace_func = self.inference_module.allocate_workspace_fp32 + elif config.dtype == torch.bfloat16: + self.allocate_workspace_func = self.inference_module.allocate_workspace_bf16 + else: + self.allocate_workspace_func = self.inference_module.allocate_workspace_fp16 + self.release_workspace_func = self.inference_module.release_workspace + self.retake_workspace_func = self.inference_module.retake_workspace + self.reset_cache_func = self.inference_module.reset_cache + except (ValueError, AttributeError) as e: + print(f"Using fallback functions in workspace because of {e}") + if config.dtype == torch.float32: + self.allocate_workspace_func = self.allocate_workspace_fp32_fallback + elif config.dtype == torch.bfloat16: + self.allocate_workspace_func = self.allocate_workspace_bf16_fallback + else: + self.allocate_workspace_func = self.allocate_workspace_fp16_fallback + self.release_workspace_func = self.release_workspace_fallback + self.retake_workspace_func = self.retake_workspace_fallback + self.reset_cache_func = self.reset_cache_fallback + + def allocate_workspace(self, *args, **kwargs): + self._is_allocated = True + return self.allocate_workspace_func(*args, **kwargs) + + def release_workspace(self): + self._is_allocated = False + return self.release_workspace_func() + + def reset_cache(self): + return self.reset_cache_func() if self.reset_cache_func else None + + def retake_workspace(self): + return self.retake_workspace_func() if self.retake_workspace_func else None + + def allocate_workspace_fp32_fallback(self, hidden_dim, num_heads, prompt_length, batch_size, num_layers, mp_size, + external_cache, rank, max_out_tokens, min_out_tokens): + return self.inference_context.gen_workspace(num_layers, num_heads, batch_size, prompt_length, hidden_dim, + mp_size, external_cache, torch.float, rank, max_out_tokens, + min_out_tokens) + + def allocate_workspace_bf16_fallback(self, hidden_dim, num_heads, prompt_length, batch_size, num_layers, mp_size, + external_cache, rank, max_out_tokens, min_out_tokens): + return self.inference_context.gen_workspace(num_layers, num_heads, batch_size, prompt_length, hidden_dim, + mp_size, external_cache, torch.bfloat16, rank, max_out_tokens, + min_out_tokens) + + def allocate_workspace_fp16_fallback(self, hidden_dim, num_heads, prompt_length, batch_size, num_layers, mp_size, + external_cache, rank, max_out_tokens, min_out_tokens): + return self.inference_context.gen_workspace(num_layers, num_heads, batch_size, prompt_length, hidden_dim, + mp_size, external_cache, torch.half, rank, max_out_tokens, + min_out_tokens) + + def reset_cache_fallback(self): + return self.inference_context.reset_tokens() + + def release_workspace_fallback(self): + return self.inference_context.release_workspace() + + def retake_workspace_fallback(self): + return self.inference_context.retake_workspace() + + def is_allocated(self): + return self._is_allocated diff --git a/deepspeed/ops/transformer/inference/triton/attention.py b/deepspeed/ops/transformer/inference/triton/attention.py index c05370ec74e5..6845d91b06be 100644 --- a/deepspeed/ops/transformer/inference/triton/attention.py +++ b/deepspeed/ops/transformer/inference/triton/attention.py @@ -125,7 +125,7 @@ def _triton_autotune(min_seqlen, context_4d_matmul(output, qkv, head_size) Fp16Matmul._update_autotune_table() - def ds_compute_attention(self, qkv_out, input_mask, layer_past, alibi): + def ds_compute_attention(self, qkv_out, input_mask, layer_past, alibi, is_prompt, token_idx, position_ids): if isinstance(qkv_out, list): qkv_out = qkv_out[0] @@ -143,7 +143,10 @@ def ds_compute_attention(self, qkv_out, input_mask, layer_past, alibi): no_masking=no_masking, layer_id=self.config.layer_id, num_layers=TritonSelfAttention.num_layers, - alibi=alibi) + alibi=alibi, + is_prompt=is_prompt, + token_idx=token_idx, + position_ids=position_ids) context_layer, key_layer, value_layer = attn_key_value return context_layer, key_layer, value_layer @@ -161,7 +164,8 @@ def forward( norm_w=None, norm_b=None, alibi=None, - use_triton_attention=True): + use_triton_attention=True, + **kwargs): if not self.config.pre_layer_norm: qkv_out = self.linear_func(input=input, @@ -192,10 +196,16 @@ def forward( triangular=self.triangular_masking) key_layer, value_layer = qkv[:, :, self.hidden_size:2 * self.hidden_size], qkv[:, :, 2 * self.hidden_size:] else: + is_prompt = kwargs.get("first_token", qkv_out[0].shape[1] > 1) + token_idx = kwargs.get("token_idx", None) + position_ids = kwargs.get("position_ids", None) context_layer, key_layer, value_layer = self.ds_compute_attention(qkv_out=qkv_out, input_mask=input_mask, layer_past=layer_past, - alibi=alibi) + alibi=alibi, + is_prompt=is_prompt, + toke_idx=token_idx, + position_ids=position_ids) output = self.vector_matmul_func(input=context_layer, weight=self.attn_ow) inp_norm = qkv_out[-1] diff --git a/deepspeed/ops/transformer/inference/triton/ops.py b/deepspeed/ops/transformer/inference/triton/ops.py index dd87d08d4d2c..dbed45313780 100644 --- a/deepspeed/ops/transformer/inference/triton/ops.py +++ b/deepspeed/ops/transformer/inference/triton/ops.py @@ -3,12 +3,10 @@ # DeepSpeed Team -import deepspeed -from deepspeed.ops.op_builder import InferenceBuilder import deepspeed.ops.transformer.inference.triton.matmul_ext as matmul_ext +from deepspeed.ops.transformer.inference.op_binding.layer_norm import LayerNormOp from deepspeed.ops.transformer.inference.triton.layer_norm import layer_norm, layer_norm_residual - -inference_module = None +from deepspeed.utils.types import ActivationFuncType def vector_matmul_func(input, weight, async_op, q_scale, q_int8, transposed_mode): @@ -76,15 +74,12 @@ def mlp_gemm_func(input, if use_triton_ln: mlp_input = layer_norm_residual(input, input_bias, residual, gamma, beta, epsilon) else: - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - mlp_input = inference_module._layer_norm_residual(input, input_bias, residual, gamma, beta, epsilon) + mlp_input = LayerNormOp.layer_norm_residual(input, input_bias, residual, gamma, beta, epsilon) # activation - if deepspeed.utils.types.ActivationFuncType(mlp_act_func_type) == deepspeed.utils.types.ActivationFuncType.GELU: + if ActivationFuncType(mlp_act_func_type) == ActivationFuncType.GELU: activation = "gelu" - elif deepspeed.utils.types.ActivationFuncType(mlp_act_func_type) == deepspeed.utils.types.ActivationFuncType.ReLU: + elif ActivationFuncType(mlp_act_func_type) == ActivationFuncType.ReLU: activation = "relu" else: activation = "" @@ -121,10 +116,7 @@ def qkv_gemm_func( if use_triton_ln: qkv_input = layer_norm(input, gamma, beta, epsilon) else: - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - qkv_input = inference_module.layer_norm(input, gamma, beta, epsilon) + qkv_input = LayerNormOp()(input, gamma, beta, epsilon) qkv_out = matmul_ext.matmul(qkv_input, weight, bias=(bias if add_bias else None), activation="", use_triton=True) diff --git a/deepspeed/runtime/activation_checkpointing/checkpointing.py b/deepspeed/runtime/activation_checkpointing/checkpointing.py index 529931ca0df1..83c2519b3f0e 100644 --- a/deepspeed/runtime/activation_checkpointing/checkpointing.py +++ b/deepspeed/runtime/activation_checkpointing/checkpointing.py @@ -30,14 +30,17 @@ from deepspeed.utils.bwc import bwc_tensor_model_parallel_rank from deepspeed.accelerator import get_accelerator from deepspeed.runtime import compiler +import os # DeepSpeed Checkpointing Enabled or Disabled deepspeed_checkpointing_enabled = False # MP parameters mpu = None -mp_rank = None -mp_size = None + +#set default values +mp_rank = 0 +mp_size = 1 mp_group = None # Model Parameters @@ -61,8 +64,6 @@ # Default name for the model parallel rng tracker. _MODEL_PARALLEL_RNG_TRACKER_NAME = 'model-parallel-rng' -transport_stream = None -cuda_device = None def detach_variable(inputs, device=None): @@ -518,35 +519,10 @@ def save_args_for_backward(*all_args): global mp_rank, mp_size, mp_group global contiguous_data_buffers, contiguous_size_buffers global data_offsets, size_offsets - if mp_rank is None: - if mpu is not None: - if hasattr(mpu, 'get_tensor_model_parallel_rank'): - mp_rank = mpu.get_tensor_model_parallel_rank() - mp_size = mpu.get_tensor_model_parallel_world_size() - mp_group = mpu.get_tensor_model_parallel_group() - else: - mp_rank = mpu.get_model_parallel_rank() - mp_size = mpu.get_model_parallel_world_size() - mp_group = mpu.get_model_parallel_group() - else: - mp_rank = 0 - mp_size = 1 - mp_group = None - - global cuda_device, transport_stream, PARTITION_ACTIVATIONS, buffer_0, buffer_1, buffer_0_offset, buffer_1_offset - - if cuda_device is None: - see_memory_usage("First Forward Beginning", force=False) - if dist.get_rank() == 0: - logger.info(f"Activation Checkpointing Information") - logger.info(f"----Partition Activations {PARTITION_ACTIVATIONS}, CPU CHECKPOINTING {CPU_CHECKPOINT}") - logger.info( - f"----contiguous Memory Checkpointing {CONTIGUOUS_CHECKPOINTING} with {num_layers} total layers") - logger.info(f"----Synchronization {SYNCHRONIZE}") - logger.info(f"----Profiling time in checkpointing {PROFILE_TIME}") + global PARTITION_ACTIVATIONS, buffer_0, buffer_1, buffer_0_offset, buffer_1_offset - cuda_device = get_accelerator().current_device_name() - transport_stream = get_accelerator().Stream(device=cuda_device) + cuda_device = get_accelerator().current_device_name() + transport_stream = get_accelerator().Stream(device=cuda_device) if PARTITION_ACTIVATIONS: inputs = partition_activations(args, CPU_CHECKPOINT, CONTIGUOUS_CHECKPOINTING) @@ -607,6 +583,9 @@ def backward(ctx, *grads): # removing pointers to the contiguous buffer memory # so that they can be garbage collected once the checkpoints # have been used + if grads[0].device.type == 'hpu': + import habana_frameworks.torch as htorch + htorch.core.mark_step() if SYNCHRONIZE: get_accelerator().synchronize() if PROFILE_TIME: @@ -631,8 +610,9 @@ def backward(ctx, *grads): raise RuntimeError("Checkpointing is not compatible with .grad(), " "please use .backward() if possible") - global cuda_device, transport_stream, PARTITION_ACTIVATIONS - + global PARTITION_ACTIVATIONS + cuda_device = get_accelerator().current_device_name() + transport_stream = get_accelerator().Stream(device=cuda_device) # Rebuild deepspeed_saved_tensors for t in ctx.deepspeed_saved_tensors: if t is not None and hasattr(t, 'saved_data') and t.saved_data is not None: @@ -764,35 +744,10 @@ def save_args_for_backward(*all_args): global mp_rank, mp_size, mp_group global contiguous_data_buffers, contiguous_size_buffers global data_offsets, size_offsets - if mp_rank is None: - if mpu is not None: - if hasattr(mpu, 'get_tensor_model_parallel_rank'): - mp_rank = mpu.get_tensor_model_parallel_rank() - mp_size = mpu.get_tensor_model_parallel_world_size() - mp_group = mpu.get_tensor_model_parallel_group() - else: - mp_rank = mpu.get_model_parallel_rank() - mp_size = mpu.get_model_parallel_world_size() - mp_group = mpu.get_model_parallel_group() - else: - mp_rank = 0 - mp_size = 1 - mp_group = None - - global cuda_device, transport_stream, PARTITION_ACTIVATIONS, buffer_0, buffer_1, buffer_0_offset, buffer_1_offset - - if cuda_device is None: - see_memory_usage("First Forward Beginning", force=False) - if dist.get_rank() == 0: - logger.info(f"Activation Checkpointing Information") - logger.info(f"----Partition Activations {PARTITION_ACTIVATIONS}, CPU CHECKPOINTING {CPU_CHECKPOINT}") - logger.info( - f"----contiguous Memory Checkpointing {CONTIGUOUS_CHECKPOINTING} with {num_layers} total layers") - logger.info(f"----Synchronization {SYNCHRONIZE}") - logger.info(f"----Profiling time in checkpointing {PROFILE_TIME}") + global PARTITION_ACTIVATIONS, buffer_0, buffer_1, buffer_0_offset, buffer_1_offset - cuda_device = get_accelerator().current_device_name() - transport_stream = get_accelerator().Stream(device=cuda_device) + cuda_device = get_accelerator().current_device_name() + transport_stream = get_accelerator().Stream(device=cuda_device) if PARTITION_ACTIVATIONS: inputs = partition_activations(args, CPU_CHECKPOINT, CONTIGUOUS_CHECKPOINTING) @@ -899,7 +854,9 @@ def replay_unpack(none_value): raise RuntimeError("Checkpointing is not compatible with .grad(), " "please use .backward() if possible") - global cuda_device, transport_stream, PARTITION_ACTIVATIONS + global PARTITION_ACTIVATIONS + cuda_device = get_accelerator().current_device_name() + transport_stream = get_accelerator().Stream(device=cuda_device) # gather inputs which is partitioned or checkpointed before first forward if PARTITION_ACTIVATIONS: @@ -988,17 +945,36 @@ def after_backward_hook(_nonuse_grads): return tuple(all_outputs) -@compiler.disable # WA from Pytorch repo for compile + zero 3 accuracy issue -def checkpoint(function, *args): +#TODO[SW-187093]: remove once WA in original pytorch code is resolved +disable_compile_bypass = os.environ.get('HL_DS_DISABLE_CHECKPOINTING_COMPILE_BYPASS', None) + + +def conditional_compile_disable(decorator_func): + + def conditional_decorator(function): + global disable_compile_bypass + if disable_compile_bypass: + return function # don't use compiler.disable decorator + return decorator_func(function) # use compiler.disable decorator + + return conditional_decorator + + +#TODO[SW-187093]: remove once WA in original pytorch code is resolved +@conditional_compile_disable(compiler.disable) # WA from Pytorch repo for compile + zero 3 accuracy issue +def checkpoint(function, *args, use_torch_checkpoint_fn: bool = False): """Checkpoint a model or part of the model. This has been directly copied from torch.utils.checkpoint. """ - all_outputs = [] - CheckpointFunction.apply(function, all_outputs, *args) - if len(all_outputs) == 1: - return all_outputs[0] + if use_torch_checkpoint_fn: + return torch.utils.checkpoint.checkpoint(function, *args) else: - return tuple(all_outputs) + all_outputs = [] + CheckpointFunction.apply(function, all_outputs, *args) + if len(all_outputs) == 1: + return all_outputs[0] + else: + return tuple(all_outputs) def partition_activations_in_checkpoint(partition_activation): @@ -1151,6 +1127,27 @@ def configure( if CONTIGUOUS_CHECKPOINTING: assert num_layers is not None, "Must specify the number of layers with contiguous memory checkpointing" + global mp_rank, mp_size, mp_group + + if mpu is not None: + if hasattr(mpu, 'get_tensor_model_parallel_rank'): + mp_rank = mpu.get_tensor_model_parallel_rank() + mp_size = mpu.get_tensor_model_parallel_world_size() + mp_group = mpu.get_tensor_model_parallel_group() + else: + mp_rank = mpu.get_model_parallel_rank() + mp_size = mpu.get_model_parallel_world_size() + mp_group = mpu.get_model_parallel_group() + + #print configuration once + see_memory_usage("After configuration", force=False) + if dist.get_rank() == 0: + logger.info(f"Activation Checkpointing Information") + logger.info(f"----Partition Activations {PARTITION_ACTIVATIONS}, CPU CHECKPOINTING {CPU_CHECKPOINT}") + logger.info(f"----contiguous Memory Checkpointing {CONTIGUOUS_CHECKPOINTING} with {num_layers} total layers") + logger.info(f"----Synchronization {SYNCHRONIZE}") + logger.info(f"----Profiling time in checkpointing {PROFILE_TIME}") + def is_configured(): """True if deepspeed activation checkpointing has been configured diff --git a/deepspeed/runtime/bf16_optimizer.py b/deepspeed/runtime/bf16_optimizer.py index 1f3365b20f4e..57becb22d771 100644 --- a/deepspeed/runtime/bf16_optimizer.py +++ b/deepspeed/runtime/bf16_optimizer.py @@ -302,8 +302,18 @@ def step(self, closure=None): mpu=self.mpu, use_graph=self.graph_harvesting) + for param_partition, grad_partition in zip(self.fp32_groups_flat_partition, + self.fp32_groups_gradient_flat_partition): + # In case of grad acc dtype different than FP32, need to cast to high precision. + param_partition.grad = grad_partition.to( + param_partition.dtype) if grad_partition.dtype != param_partition.dtype else grad_partition + self.optimizer.step() + if self.grad_acc_dtype is not torch.float32: + for param_partition in self.fp32_groups_flat_partition: + param_partition.grad = None + # We need to link optimizer state after the first step() call self._lazy_init_hp_params_optimizer_state() @@ -526,7 +536,7 @@ def param_groups(self): def accumulate_hp_grads_and_remove_lp(self, lp_param, group_idx, param_idx): assert self.immediate_grad_update - self._update_hp_grad(lp_param, group_idx, param_idx, clear_lp_grads=True) + self._update_hp_grad(lp_param, group_idx, param_idx, clear_lp_grads=False) def create_grad_acc_hooks(self): self.grad_accs = [] diff --git a/deepspeed/runtime/compiler.py b/deepspeed/runtime/compiler.py index 879c0a1a2cc9..940ff79b9a56 100644 --- a/deepspeed/runtime/compiler.py +++ b/deepspeed/runtime/compiler.py @@ -4,6 +4,8 @@ # DeepSpeed Team import torch +import contextlib +from deepspeed.accelerator import get_accelerator def is_compile_supported(): @@ -14,3 +16,16 @@ def disable(func): if is_compile_supported(): return torch.compiler.disable(func) return func + + +@contextlib.contextmanager +def compiled_autograd(enabled, kwargs): + try: + if enabled: + with torch._dynamo.compiled_autograd.enable( + torch.compile(backend=get_accelerator().get_compile_backend(), **kwargs)): + yield + else: + yield + finally: + pass diff --git a/deepspeed/runtime/engine.py b/deepspeed/runtime/engine.py index 4c418fbc532e..af060b71fe75 100644 --- a/deepspeed/runtime/engine.py +++ b/deepspeed/runtime/engine.py @@ -35,6 +35,8 @@ from deepspeed.runtime.fp16.unfused_optimizer import FP16_UnfusedOptimizer from deepspeed.runtime.bf16_optimizer import BF16_Optimizer +from deepspeed.linear.optimized_linear import LoRAOptimizedLinear + from deepspeed.runtime.config import DEEPSPEED_OPTIMIZERS, \ ADAGRAD_OPTIMIZER, ADAM_OPTIMIZER, ADAMW_OPTIMIZER, LAMB_OPTIMIZER, ONEBIT_ADAM_OPTIMIZER, ONEBIT_LAMB_OPTIMIZER, \ TORCH_ADAM_PARAM, ADAM_W_MODE, ADAM_W_MODE_DEFAULT, ZERO_ONE_ADAM_OPTIMIZER, MUADAM_OPTIMIZER, MUADAMW_OPTIMIZER, \ @@ -90,15 +92,17 @@ from .pipe.module import PipelineModule from .utils import get_ma_status -from .compiler import is_compile_supported +from .compiler import is_compile_supported, compiled_autograd from ..ops.adam import FusedAdam from ..moe.sharded_moe import TopKGate, MOELayer +from ..moe.capacity_bins import optimize_bins from ..moe.layer import MoE from ..moe.utils import is_moe_param, configure_moe_param_groups from ..git_version_info import version from deepspeed.profiling.flops_profiler.profiler import FlopsProfiler from deepspeed.utils.logging import print_json_dist, print_configuration +from deepspeed.utils.bwc import bwc_tensor_model_parallel_rank from deepspeed.accelerator import get_accelerator @@ -222,6 +226,7 @@ def __init__(self, self.num_experts = [] self.gate_modules = [] self.moe_layers = [] + self.has_sequence_parallel_params = False self._step_applied = False self._global_grad_norm = None self.use_ds_comm = False # False --> Use torch.dist, True --> Use ds.comm backend. @@ -312,6 +317,14 @@ def __init__(self, elif self.bfloat16_enabled(): self.optimizer = self._configure_bf16_optimizer(optimizer=None) + #Sequence parallel related initialization + for param in self.module.parameters(): + if getattr(param, 'sequence_parallel', False): + self.has_sequence_parallel_params = True + break + if self.has_sequence_parallel_params: + assert self.mpu is not None, "sequence parallel allreduce only supported with tensor parallel enabled" + # Hook optimizer for snip_momentum pruning if hasattr(model, 'pruners'): from ..compression.helper import rewrite_optimizer_step @@ -326,6 +339,8 @@ def __init__(self, self.sparse_tensor_module_names.add(name + ".weight") logger.info("Will convert {} to sparse tensor during training".format(name)) + self._optimized_linear_offload_setup() + self.save_non_zero_checkpoint = False self.save_zero_checkpoint = False if not isinstance(self.optimizer, DeepSpeedZeRoOffload): @@ -362,6 +377,46 @@ def __init__(self, self.unflatten = _unflatten_dense_tensors self._is_compiled = False + self._is_optimizer_compiled = False + self._is_compiled_autograd_enabled = False + self._compile_kwargs = {} + + def _optimized_linear_offload_setup(self): + self.optimized_linear_base_weight_sharding = False + self.optimized_linear_lora_enabled = False + offload_ratio = None + for _, module in self.module.named_modules(): + if isinstance(module, LoRAOptimizedLinear): + self.optimized_linear_lora_enabled = True + offload_ratio = None + if offload_ratio is not None: + assert offload_ratio == module.lora_config.offload_ratio, \ + "all lora_config offload ratios should be the same across the model" + offload_ratio = module.lora_config.offload_ratio + if module.zero_shards > 1: + # set attr so checkpoint saving can handle BWS properly + self.optimized_linear_base_weight_sharding = True + + if offload_ratio is None: + # Nothing enabled, do nothing + return + + total_params = 0 + for _, p in self.module.named_parameters(): + if hasattr(p, 'ds_optim_param'): + total_params += p.numel() + + offload_limit = total_params * offload_ratio + logger.info(f'offloading {offload_ratio*100}% of eligible params, specifically {offload_limit} params') + total_offloaded = 0 + for _, p in self.module.named_parameters(): + if hasattr(p, 'ds_optim_param'): + if total_offloaded < offload_limit: + total_offloaded += p.numel() + p.ds_offload = True + p.offload() + else: + p.ds_offload = False def destroy(self): if self.optimizer is not None and hasattr(self.optimizer, 'destroy'): @@ -453,7 +508,10 @@ def get_global_grad_norm(self) -> float: Returns: float: norm """ - return self._global_grad_norm + grad_norm = self._global_grad_norm + if isinstance(grad_norm, torch.Tensor): + grad_norm = grad_norm.item() + return grad_norm def __getattr__(self, name): """ @@ -968,13 +1026,13 @@ def _set_distributed_vars(self, args): device_rank = args.device_rank if args is not None and hasattr(args, 'device_rank') else self.local_rank if device_rank >= 0: get_accelerator().set_device(device_rank) - self.device = torch.device(get_accelerator().device_name(), device_rank) + self.device = torch.device(get_accelerator().device_name(device_rank)) self.world_size = dist.get_world_size() self.global_rank = dist.get_rank() else: self.world_size = 1 self.global_rank = 0 - self.device = torch.device(get_accelerator().device_name()) + self.device = get_accelerator().device() # Configure based on command line arguments def _configure_with_arguments(self, args, mpu): @@ -1054,9 +1112,12 @@ def _broadcast_model(self): def is_replicated(p): if hasattr(p, "ds_status") and p.ds_status is not ZeroParamStatus.AVAILABLE: return False + elif hasattr(p, 'ds_optim_param'): + # do not broadcast OptimizedLinear parameters, they are unique per base weight shard + return False return True - for p in self.module.parameters(): + for n, p in self.module.named_parameters(): # Broadcast the model for different parameters if is_moe_param(p): if torch.is_tensor(p) and is_replicated(p): @@ -1962,35 +2023,36 @@ def backward(self, loss, allreduce_gradients=True, release_loss=False, retain_gr self._start_timers(self.engine_timers.backward_inner_timers) - if self.zero_optimization(): - self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary() - self.optimizer.backward(loss, retain_graph=retain_graph) - elif self.amp_enabled(): - # AMP requires delaying unscale when inside gradient accumulation boundaries - # https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations - delay_unscale = not self.is_gradient_accumulation_boundary() - with amp.scale_loss(loss, self.optimizer, delay_unscale=delay_unscale) as scaled_loss: - scaled_loss.backward(retain_graph=retain_graph) - elif self.fp16_enabled(): - if self.eigenvalue_enabled(): - self.optimizer.backward(loss, create_graph=True, retain_graph=True) - else: + with compiled_autograd(self._is_compiled_autograd_enabled, self._compile_kwargs): + if self.zero_optimization(): + self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary() self.optimizer.backward(loss, retain_graph=retain_graph) - elif self.bfloat16_enabled(): - self.optimizer.backward(loss) - else: - if self.eigenvalue_enabled(): - loss.backward(create_graph=True, retain_graph=True) + elif self.amp_enabled(): + # AMP requires delaying unscale when inside gradient accumulation boundaries + # https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations + delay_unscale = not self.is_gradient_accumulation_boundary() + with amp.scale_loss(loss, self.optimizer, delay_unscale=delay_unscale) as scaled_loss: + scaled_loss.backward(retain_graph=retain_graph) + elif self.fp16_enabled(): + if self.eigenvalue_enabled(): + self.optimizer.backward(loss, create_graph=True, retain_graph=True) + else: + self.optimizer.backward(loss, retain_graph=retain_graph) + elif self.bfloat16_enabled(): + self.optimizer.backward(loss) else: - loss.backward(retain_graph=retain_graph) + if self.eigenvalue_enabled(): + loss.backward(create_graph=True, retain_graph=True) + else: + loss.backward(retain_graph=retain_graph) - self._stop_timers(self.engine_timers.backward_inner_timers) + self._stop_timers(self.engine_timers.backward_inner_timers) - self._start_timers(self.engine_timers.backward_reduce_timers) + self._start_timers(self.engine_timers.backward_reduce_timers) - if allreduce_gradients and self.enable_backward_allreduce: - # Traditional code path that allreduces the module parameter grads - self.allreduce_gradients() + if allreduce_gradients and self.enable_backward_allreduce: + # Traditional code path that allreduces the module parameter grads + self.allreduce_gradients() self._stop_timers(self.engine_timers.backward_reduce_timers) @@ -2462,6 +2524,14 @@ def buffered_allreduce_fallback(self, grads=None, elements_per_buffer=500000000) if self.has_moe_layers: self._reduce_expert_gradients(expert_grads, elements_per_buffer) + if self.has_sequence_parallel_params: + for i, group in enumerate(self.optimizer.bf16_groups): + for j, lp in enumerate(group): + if getattr(lp, 'sequence_parallel', False): + dist.all_reduce(self.optimizer.fp32_groups_gradients[i][j], + op=dist.ReduceOp.SUM, + group=self.mpu.get_slice_parallel_group()) + def sparse_allreduce_no_retain(self, bucket, dp_group, dp_world_size=None): allreduced_sparses = self.sparse_allreduce_bucket(bucket, dp_group, dp_world_size) # Densify sparse tensor and copy back to original location @@ -2493,9 +2563,10 @@ def sparse_allreduce(self, sparse, dp_group, dp_world_size=None): dp_world_size = dist.get_world_size(group=dp_group) if self.postscale_gradients(): if self.gradient_average: - values.mul_(self.gradient_predivide_factor() / (dp_world_size)) + + values.mul_(self.gradient_predivide_factor() / (dp_world_size / float(self.sequence_parallel_size))) else: - values.mul_(1. / (dp_world_size)) + values.mul_(1. / (dp_world_size / float(self.sequence_parallel_size))) indices_device_list = self.sparse_all_gather(indices, dp_group) values_device_list = self.sparse_all_gather(values, dp_group) @@ -3604,7 +3675,11 @@ def empty_partition_cache(self): gc.collect() get_accelerator().empty_cache() - def compile(self, backend=get_accelerator().get_compile_backend(), compile_kwargs={}) -> None: + def compile(self, + backend=get_accelerator().get_compile_backend(), + compile_kwargs={}, + compile_optimizer_step=False, + compiled_autograd_enabled=False) -> None: """Compile the module using the specified backend and kwargs. If a compiler_fn is set, it will be used instead of torch.compile(). """ @@ -3616,7 +3691,94 @@ def compile(self, backend=get_accelerator().get_compile_backend(), compile_kwarg self.module.compile(backend=backend, **compile_kwargs) self._is_compiled = True + if compile_optimizer_step: + if not self._is_optimizer_compiled: + self.optimizer.step = torch.compile(self.optimizer.step, backend=backend, **compile_kwargs) + self._is_optimizer_compiled = True + self._is_compiled_autograd_enabled = compiled_autograd_enabled + self._compile_kwargs = compile_kwargs @property def is_compiled(self) -> bool: return self._is_compiled + + def optimize_moe(self, step, max_grouped_experts=1): + """ Optimize MoE gate capacity bins + + If MoE is using capacity bins, optimize the bins based on running stats. + In order to reduce the number of compilation recipes, we optimize a set + of grouped gates together. + The grouped gates must have same number of experts. + """ + if not self.has_moe_layers: + return + + # find all gates with capacity factor + gate_with_capacity_bins_idx = [i for i, gate in enumerate(self.gate_modules) if gate.has_capacity_bins()] + if len(gate_with_capacity_bins_idx) == 0: + return + + # handle only gates have capacity bins usage statistics + gate_capacity_bin_stats = OrderedDict() + for i in gate_with_capacity_bins_idx: + gate = self.gate_modules[i] + if hasattr(gate, 'get_stats'): + stats = gate.get_stats(incremental=False) + if stats is not None and 'capacity_bins' in stats: + gate_capacity_bin_stats[i] = stats['capacity_bins'] + if len(gate_capacity_bin_stats) == 0: + return + + del gate_with_capacity_bins_idx # removing the list because it is out of date + + # divide gates into groups up to max_grouped_experts or until different num_experts encountered + gate_groups = [] + first_gate_idx = list(gate_capacity_bin_stats.keys())[0] + current_group = [first_gate_idx] + current_group_n_experts = self.num_experts[first_gate_idx] + for i in list(gate_capacity_bin_stats.keys())[1:]: + if self.num_experts[i] == current_group_n_experts and len(current_group) < max_grouped_experts: + current_group.append(i) + else: + gate_groups.append(current_group) + current_group = [i] + current_group_n_experts = self.num_experts[i] + gate_groups.append(current_group) + + # print new optimized groups for each pipeline stage (no sharing across pp stages) + dp_rank = dist.get_rank(group=self.mpu.get_data_parallel_group()) + tp_rank = bwc_tensor_model_parallel_rank(self.mpu) + log_ranks = [self.global_rank] if dp_rank == 0 and tp_rank == 0 else [] + + # for each group, (1) accumulate stats (2) calculate optimized capacity and (3) reconfigure bins + for gate_group in gate_groups: + group_stats = [] + for i in gate_group: + group_stats.append(gate_capacity_bin_stats[i]) + + # sanity - verify all gates in groups have same bins edges + bins_edges = [stats['edges'] for stats in group_stats] + same_edges = all(torch.equal(bins_edges[0], tensor) for tensor in bins_edges[1:]) + assert same_edges, f'Got different capacity bin edges for group={gate_group} edges={bins_edges}' + + # accumulate usage + stacked_usage = torch.stack([stats['usage'] for stats in group_stats], dim=0) + total_group_usage = torch.sum(stacked_usage, dim=0) + + # find optimized bins for this group + min_range = group_stats[0]['min_range'] + current_bins = group_stats[0]['edges'] + alignment = group_stats[0]['alignment'] + min_bin_size = group_stats[0]['min_bin_size'] + new_bins = optimize_bins(min_range=min_range, + bins=current_bins, + bins_usage=total_group_usage, + alignment=alignment, + min_bin_size=min_bin_size) + + # configure gates in group with new bins + for i in gate_group: + gate = self.gate_modules[i] + capacity_bins = gate.get_capacity_bins() + capacity_bins.set_bins(new_bins) + log_dist(f'step={step}, optimize capacity bins for group={gate_group} bins={new_bins}', ranks=log_ranks) diff --git a/deepspeed/runtime/hpu_utils.py b/deepspeed/runtime/hpu_utils.py new file mode 100644 index 000000000000..5a7ca4d4b97f --- /dev/null +++ b/deepspeed/runtime/hpu_utils.py @@ -0,0 +1,11 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed.accelerator import get_accelerator + + +def get_use_hpu(): + return get_accelerator().device_name() == "hpu" diff --git a/deepspeed/runtime/hybrid_engine.py b/deepspeed/runtime/hybrid_engine.py index a991c4304563..8a6311bb6e83 100644 --- a/deepspeed/runtime/hybrid_engine.py +++ b/deepspeed/runtime/hybrid_engine.py @@ -17,16 +17,14 @@ from deepspeed.accelerator import get_accelerator from torch import nn from deepspeed.utils import logger - -from deepspeed.ops.op_builder import InferenceBuilder - from deepspeed.module_inject.layers import LinearLayer, Normalize, EmbeddingLayer, OPTEmbedding +from ..ops.transformer.inference.op_binding.workspace import WorkspaceOp + try: import transformers OPTLearnedPositionalEmbedding = transformers.models.opt.modeling_opt.OPTLearnedPositionalEmbedding except: OPTLearnedPositionalEmbedding = None -inference_cuda_module = None class DeepSpeedHybridEngine(DeepSpeedEngine): @@ -61,12 +59,8 @@ def __init__(self, args, model, **kwargs): self._total_batch_size = None self._gather_latency = 0 - global inference_cuda_module - if inference_cuda_module is None: - builder = InferenceBuilder() - inference_cuda_module = builder.load() - self.is_lora_fused = False + self.workspace = WorkspaceOp() def convert_to_linear_transposed(self, model): @@ -160,13 +154,13 @@ def unfuse_lora_weight_non_pinned(self): def retake_inference_cache(self): if self._config.hybrid_engine.release_inference_cache: - retake_success = inference_cuda_module.retake_workspace() + retake_success = self.workspace.retake_workspace() if not retake_success: logger.warning("Unable to acquire workspace on first attempt, emptying cache and retrying.") gc.collect() get_accelerator().empty_cache() - retake_success = inference_cuda_module.retake_workspace() + retake_success = self.workspace.retake_workspace() if not retake_success: raise RuntimeError("Unable to retake inference workspace.") @@ -269,7 +263,7 @@ def generate(self, *inputs, **kwargs): self.is_lora_fused = False if self._config.hybrid_engine.release_inference_cache: - inference_cuda_module.release_workspace() + self.workspace.release_workspace() gc.collect() get_accelerator().empty_cache() diff --git a/deepspeed/runtime/lr_schedules.py b/deepspeed/runtime/lr_schedules.py index d7f7e15a4dbd..e0554b9709e0 100755 --- a/deepspeed/runtime/lr_schedules.py +++ b/deepspeed/runtime/lr_schedules.py @@ -13,6 +13,7 @@ from torch.optim import Optimizer import math from deepspeed.utils import logger +from torch import tensor, is_tensor LR_SCHEDULE = 'lr_schedule' LR_RANGE_TEST = 'LRRangeTest' @@ -694,6 +695,9 @@ def step(self, last_batch_iteration=None): last_batch_iteration = self.last_batch_iteration + 1 self.last_batch_iteration = last_batch_iteration for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): + # new LR should match the type of current LR for scalar and Tensor LR support + if is_tensor(param_group['lr']): + lr = tensor(lr, device=param_group['lr'].device) param_group['lr'] = lr self._last_lr = [group['lr'] for group in self.optimizer.param_groups] diff --git a/deepspeed/runtime/pipe/engine.py b/deepspeed/runtime/pipe/engine.py index 9e84121d50fa..6944d85a41c1 100644 --- a/deepspeed/runtime/pipe/engine.py +++ b/deepspeed/runtime/pipe/engine.py @@ -151,11 +151,33 @@ def __init__(self, has_bool_tensors=False, *super_args, **super_kwargs): if self.global_rank != min(d['ranks']): tied_params += sum(p.numel() for p in d['module'].parameters()) unique_params -= tied_params - params_tensor = torch.LongTensor(data=[num_params, unique_params]).to(self.device) + + # Use Int32 representation instead of Int64 for calclations. + # num_param division & modulo after all reduce should be lower than MAX Int32. + # Using this value will be safe if used with less than ~2000 devices. + # Int32Max > all_reduce_group*chunk_size + chunk_size = 10**6 + + num_params_quotient = num_params // chunk_size + num_params_remainder = num_params % chunk_size + + unique_params_quotient = unique_params // chunk_size + unique_params_remainder = unique_params % chunk_size + + assert (unique_params_quotient * chunk_size + + unique_params_remainder) == unique_params, "Value mismatch after Int64 splitting" + assert (num_params_quotient * chunk_size + + num_params_remainder) == num_params, "Value mismatch after Int64 splitting" + + params_tensor = torch.IntTensor( + data=[num_params_quotient, num_params_remainder, unique_params_quotient, unique_params_remainder]).to( + self.device) + dist.all_reduce(params_tensor, group=self.grid.get_model_parallel_group()) params_tensor = params_tensor.tolist() - total_params = params_tensor[0] - unique_params = params_tensor[1] + total_params = params_tensor[0] * chunk_size + params_tensor[1] + unique_params = params_tensor[2] * chunk_size + params_tensor[3] + if self.grid.data_parallel_id == 0: logger.info(f'RANK={self.global_rank} ' f'STAGE={self.stage_id} ' @@ -213,6 +235,8 @@ def __init__(self, has_bool_tensors=False, *super_args, **super_kwargs): self.module.activation_checkpoint_func = ds_checkpointing.non_reentrant_checkpoint if self.grid.get_global_rank() == 0: logger.info(f'CONFIG: activation_checkpoint_func=non_reentrant_checkpoint') + if self.module.activation_checkpoint_interval > 0: + self.module._precompute_checkpointable_values() self.module.checkpoint_parallel_write_pipeline = self._config.checkpoint_parallel_write_pipeline @@ -471,9 +495,7 @@ def eval_batch(self, micro_batches = self.micro_batches if num_micro_batches is None else num_micro_batches # Do the work - sched = schedule.InferenceSchedule(micro_batches=self.micro_batches, - stages=self.num_stages, - stage_id=self.stage_id) + sched = schedule.InferenceSchedule(micro_batches=micro_batches, stages=self.num_stages, stage_id=self.stage_id) # prevent dead-lock with multiple evals sequence dist.barrier() diff --git a/deepspeed/runtime/pipe/module.py b/deepspeed/runtime/pipe/module.py index 8036faef72ee..e64f801b1325 100644 --- a/deepspeed/runtime/pipe/module.py +++ b/deepspeed/runtime/pipe/module.py @@ -196,6 +196,16 @@ def __init__(self, #newseed = get_accelerator().initial_seed() + self._grid.get_stage_id() #ds_utils.set_random_seed(newseed) + self.activation_checkpoint_interval = activation_checkpoint_interval + + self.activation_checkpoint_func = activation_checkpoint_func + + #storage for precomputed checkpointeble results + self.is_checkpointable_results = [] + self.is_checkpointable_results_interval = None + + # if configuration use_reentrant = False, self.activation_checkpoint_func will be set to ``checkpointing.non_reentrant_checkpoint`` + #with torch.random.fork_rng(devices=[get_accelerator().current_device_name()]): self._build() self.to(get_accelerator().device_name(self.local_rank)) @@ -203,10 +213,15 @@ def __init__(self, self.tied_comms = self._index_tied_modules() self._synchronize_tied_weights() - self.activation_checkpoint_interval = activation_checkpoint_interval - - self.activation_checkpoint_func = activation_checkpoint_func - # if configuration use_reentrant = False, self.activation_checkpoint_func will be set to ``checkpointing.non_reentrant_checkpoint`` + def _precompute_checkpointable_values(self): + if self.activation_checkpoint_interval > 0 and self.is_checkpointable_results_interval != self.activation_checkpoint_interval: + num_layers = len(self.forward_funcs) + self.interval_was_zero = False + for start_idx in range(0, num_layers, self.activation_checkpoint_interval): + end_idx = min(start_idx + self.activation_checkpoint_interval, num_layers) + funcs = self.forward_funcs[start_idx:end_idx] + self.is_checkpointable_results.append(self._is_checkpointable(funcs)) + self.is_checkpointable_results_interval = self.activation_checkpoint_interval def _build(self): specs = self._layer_specs @@ -352,7 +367,9 @@ def exec_func(*inputs): else: num_layers = len(self.forward_funcs) x = forward_input - for start_idx in range(0, num_layers, self.activation_checkpoint_interval): + for start_idx, is_checkpointable_result in \ + zip(range(0, num_layers, self.activation_checkpoint_interval), self.is_checkpointable_results): + end_idx = min(start_idx + self.activation_checkpoint_interval, num_layers) funcs = self.forward_funcs[start_idx:end_idx] @@ -361,7 +378,7 @@ def exec_func(*inputs): if not isinstance(x, tuple): x = (x, ) - if self._is_checkpointable(funcs): + if is_checkpointable_result: x = self.activation_checkpoint_func(exec_range_func(start_idx, end_idx), *x) else: x = exec_range_func(start_idx, end_idx)(*x) @@ -641,3 +658,12 @@ def get_additional_losses(self): Return a dictionary of {"loss name": loss_value} or None if no additional losses. """ return None + + #TODO(SW-198696) remove workaround for micro offset torch compile issue + def compile(self, *args, **kwargs): + for idx, layer in enumerate(self.forward_funcs): + if isinstance(layer, nn.Module): + layer.compile(*args, **kwargs) + else: + new_layer = torch.compile(layer, *args, **kwargs) + self.forward_funcs[idx] = new_layer diff --git a/deepspeed/runtime/state_dict_factory.py b/deepspeed/runtime/state_dict_factory.py index c2db85d1ba58..469e2fe9f55f 100755 --- a/deepspeed/runtime/state_dict_factory.py +++ b/deepspeed/runtime/state_dict_factory.py @@ -335,10 +335,26 @@ def merge_state_dict(self, new_client_sd[key] = torch.cat(value_list, axis=0) else: new_client_sd[key] = self.merge_query_key_value(value_list, ckpt_ver) - elif "mlp.dense_h_to_4h.weight" in key or "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key: - if quantize and "mlp.dense_h_to_4h.weight" in key: - value_list = quantizer.Quantize(value_list, quantize_bits, groups, key=key) + elif "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key or "lm_head.weight" in key: new_client_sd[key] = torch.cat(value_list, axis=0) + elif "mlp.dense_h_to_4h.weight" in key: + if quantize: + value_list = quantizer.Quantize(value_list, quantize_bits, groups, key=key) + # HACK: + # Following code checks if h_to_4h is swiglu. This is required in order to merge correctly. + # The correct way is to add metadata to state_dict that provides info on how to merge/split each tensor. + size_h_to_4h = sd_list[0]["mlp.dense_h_to_4h.weight"].numel() + size_4h_to_h = sd_list[0]["mlp.dense_4h_to_h.weight"].numel() + if size_h_to_4h == size_4h_to_h: + new_client_sd[key] = torch.cat(value_list, axis=0) + elif size_h_to_4h == 2 * size_4h_to_h: + chunked_slices = [torch.chunk(v, 2, dim=0) for v in value_list] + merged_chunks_0 = torch.cat([s[0] for s in chunked_slices], dim=0) + merged_chunks_1 = torch.cat([s[1] for s in chunked_slices], dim=0) + new_client_sd[key] = torch.cat([merged_chunks_0, merged_chunks_1], dim=0) + else: + assert False, f"Unsupported slices size of mlp.dense_h_to_4h.weight={size_h_to_4h} " \ + f"mlp.dense_4h_to_h.weight={size_4h_to_h}" else: new_client_sd[key] = value_list[0] if quantize: @@ -383,12 +399,27 @@ def split_state_dict(self, q_vals = quantizer.Quantize([value], quantize_bits, groups, key) value = q_vals[0] new_client_sd[key] = self.split_query_key_value(value, num_to_split, ckpt_offset, ckpt_ver) - elif "mlp.dense_h_to_4h.weight" in key or "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key or "final_linear.weight" in key: + elif "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key or "final_linear.weight" in key \ + or "lm_head.weight" in key: assert value.shape[0] % num_to_split == 0 split_size = value.shape[0] // num_to_split - if quantize and "mlp.dense_h_to_4h.weight" in key: + new_client_sd[key] = torch.split(value, split_size, dim=0)[ckpt_offset] + elif "mlp.dense_h_to_4h.weight" in key: + assert value.shape[0] % num_to_split == 0 + split_size = value.shape[0] // num_to_split + if quantize: q_vals = quantizer.Quantize([value], quantize_bits, groups, key) value = q_vals[0] + # HACK: + # Following code checks if h_to_4h is swiglu. + # The correct way to check is to add metadata to state_dict that provides info on + # how to merge/split each tensor. + # Currently, swiglu split is NOT supported as it requires handling of all chunks. + size_h_to_4h = value.numel() + size_4h_to_h = client_sd["mlp.dense_4h_to_h.weight"].numel() + assert size_h_to_4h == size_4h_to_h, \ + f"Split not supported dense_h_to_4h.weight size={size_h_to_4h} " \ + f"and dense_4h_to_h.weight size={size_4h_to_h}" new_client_sd[key] = torch.split(value, split_size, dim=0)[ckpt_offset] else: new_client_sd[key] = value diff --git a/deepspeed/runtime/zero/config.py b/deepspeed/runtime/zero/config.py index 2089d59dbce4..410bfc8c82a4 100644 --- a/deepspeed/runtime/zero/config.py +++ b/deepspeed/runtime/zero/config.py @@ -246,6 +246,12 @@ class DeepSpeedZeroConfig(DeepSpeedConfigModel): new_param="gather_16bit_weights_on_model_save") """ Deprecated, please use ``gather_16bit_weights_on_model_save`` """ + use_all_reduce_for_fetch_params: bool = Field(False, alias="stage3_use_all_reduce_for_fetch_params") + """ + Use all_reduce op when fetching module parameters at stage3. This allows to significantly improve + performance by reducing the overhead of concatenation and slicing on the host. + """ + ignore_unused_parameters: bool = True """ Unused parameters in modules may be unexpected in static networks, but diff --git a/deepspeed/runtime/zero/parameter_offload.py b/deepspeed/runtime/zero/parameter_offload.py index e9e79c2647fb..98540363bb88 100644 --- a/deepspeed/runtime/zero/parameter_offload.py +++ b/deepspeed/runtime/zero/parameter_offload.py @@ -41,7 +41,7 @@ def _apply_forward_and_backward_to_tensors_only(module, forward_function, backwa class ZeROOrderedDict(OrderedDict): - def __init__(self, parent_module, *args, **kwargs): + def __init__(self, parent_module=None, *args, **kwargs): """A replacement for ``collections.OrderedDict`` to detect external ZeRO params. Args: @@ -59,7 +59,7 @@ def __getitem__(self, key): if param is None: return param - if param.ds_status == ZeroParamStatus.NOT_AVAILABLE: + if hasattr(param, "ds_status") and param.ds_status == ZeroParamStatus.NOT_AVAILABLE: if self._parent_module._parameters._in_forward: register_external_parameter(FWD_MODULE_STACK[-1], param) param.all_gather() @@ -327,39 +327,6 @@ def _bwd_hook_unexpected_inputs_msg(value): def _pre_backward_module_hook(module, inputs, output): - if not hasattr(module, "pre_bwd_fn"): - - @instrument_w_nvtx - def _run_before_backward_function(sub_module): - # some models (e.g. Albert) may run multiple forwards on the same layer in a loop - # before doing backwards, so each backward will need a pre-fetch - using reference - # counting to support this scenario - #print(f"COUNTER before: {sub_module.applied_pre_backward_ref_cnt}") - if sub_module.applied_pre_backward_ref_cnt > 0: - self.pre_sub_module_backward_function(sub_module) - sub_module.applied_pre_backward_ref_cnt -= 1 - #print(f"COUNTER after: {sub_module.applied_pre_backward_ref_cnt}") - - class PreBackwardFunctionForModule(torch.autograd.Function): - - @staticmethod - def forward(ctx, outputs): - # Capture `module` and _run_before_backward_function - ctx.module = module - ctx.pre_backward_function = _run_before_backward_function - if not hasattr(ctx.module, "applied_pre_backward_ref_cnt"): - ctx.module.applied_pre_backward_ref_cnt = 0 - ctx.module.applied_pre_backward_ref_cnt += 1 - outputs = outputs.detach() - return outputs - - @staticmethod - def backward(ctx, *args): - ctx.pre_backward_function(ctx.module) - return args - - module.pre_bwd_fn = PreBackwardFunctionForModule - return apply_to_tensors_only(module.pre_bwd_fn.apply, output, warning_msg_fn=_bwd_hook_unexpected_inputs_msg) @@ -387,41 +354,6 @@ def _run_before_forward_function(input): def _post_backward_module_hook(module, inputs): module.ds_grads_remaining = 0 - if not hasattr(module, "post_bwd_fn"): - - @instrument_w_nvtx - def _run_after_backward_function(sub_module): - if sub_module.ds_grads_remaining == 0: - self.post_sub_module_backward_function(sub_module) - - class PostBackwardFunctionModule(torch.autograd.Function): - - @staticmethod - def forward(ctx, output): - ctx.module = module - if output.requires_grad: - #TODO SOME TIMES post backward does not seem to be triggered debug in detail - #Should only cause increase in memory not correctness issue - #if output.grad_fn.__class__.__name__ == 'ViewBackward': - # ctx.view=True - # print(f"Warning view tensor for input to module : {module.__class__.__name__}. Backward hooks may not trigger properly") - #assert len(module.parameters(recurse=False)), "The input tensor to the module is a view, and autograd Function or register_hook is not triggered with view tensors." - #if module.ds_grads_remaining == 0: - # print(f"Before Forward: {ctx.module.__class__.__name__}") - module.ds_grads_remaining += 1 - ctx.post_backward_function = _run_after_backward_function - output = output.detach() - return output - - @staticmethod - def backward(ctx, *args): - ctx.module.ds_grads_remaining = ctx.module.ds_grads_remaining - 1 - if ctx.module.ds_grads_remaining == 0: - ctx.post_backward_function(ctx.module) - return args - - module.post_bwd_fn = PostBackwardFunctionModule - return apply_to_tensors_only(module.post_bwd_fn.apply, inputs, warning_msg_fn=_bwd_hook_unexpected_inputs_msg) @@ -433,9 +365,77 @@ def backward(ctx, *args): self.forward_hooks.append(module.register_forward_hook(_post_forward_module_hook)) # Pre backward hook + if not hasattr(module, "pre_bwd_fn"): + + @instrument_w_nvtx + def _run_before_backward_function(sub_module): + # some models (e.g. Albert) may run multiple forwards on the same layer in a loop + # before doing backwards, so each backward will need a pre-fetch - using reference + # counting to support this scenario + #print(f"COUNTER before: {sub_module.applied_pre_backward_ref_cnt}") + if sub_module.applied_pre_backward_ref_cnt > 0: + self.pre_sub_module_backward_function(sub_module) + sub_module.applied_pre_backward_ref_cnt -= 1 + #print(f"COUNTER after: {sub_module.applied_pre_backward_ref_cnt}") + + class PreBackwardFunctionForModule(torch.autograd.Function): + + @staticmethod + def forward(ctx, outputs): + # Capture `module` and _run_before_backward_function + ctx.module = module + ctx.pre_backward_function = _run_before_backward_function + if not hasattr(ctx.module, "applied_pre_backward_ref_cnt"): + ctx.module.applied_pre_backward_ref_cnt = 0 + ctx.module.applied_pre_backward_ref_cnt += 1 + outputs = outputs.detach() + return outputs + + @staticmethod + def backward(ctx, *args): + ctx.pre_backward_function(ctx.module) + return args + + module.pre_bwd_fn = PreBackwardFunctionForModule + self.backward_hooks.append(module.register_forward_hook(_pre_backward_module_hook)) # post backward hook + if not hasattr(module, "post_bwd_fn"): + + @instrument_w_nvtx + def _run_after_backward_function(sub_module): + if sub_module.ds_grads_remaining == 0: + self.post_sub_module_backward_function(sub_module) + + class PostBackwardFunctionModule(torch.autograd.Function): + + @staticmethod + def forward(ctx, output): + ctx.module = module + if output.requires_grad: + #TODO SOME TIMES post backward does not seem to be triggered debug in detail + #Should only cause increase in memory not correctness issue + #if output.grad_fn.__class__.__name__ == 'ViewBackward': + # ctx.view=True + # print(f"Warning view tensor for input to module : {module.__class__.__name__}. Backward hooks may not trigger properly") + #assert len(module.parameters(recurse=False)), "The input tensor to the module is a view, and autograd Function or register_hook is not triggered with view tensors." + #if module.ds_grads_remaining == 0: + # print(f"Before Forward: {ctx.module.__class__.__name__}") + module.ds_grads_remaining += 1 + ctx.post_backward_function = _run_after_backward_function + output = output.detach() + return output + + @staticmethod + def backward(ctx, *args): + ctx.module.ds_grads_remaining = ctx.module.ds_grads_remaining - 1 + if ctx.module.ds_grads_remaining == 0: + ctx.post_backward_function(ctx.module) + return args + + module.post_bwd_fn = PostBackwardFunctionModule + self.backward_hooks.append(module.register_forward_pre_hook(_post_backward_module_hook)) @torch.no_grad() diff --git a/deepspeed/runtime/zero/partition_parameters.py b/deepspeed/runtime/zero/partition_parameters.py index f76bcf0eb781..08185d569d04 100755 --- a/deepspeed/runtime/zero/partition_parameters.py +++ b/deepspeed/runtime/zero/partition_parameters.py @@ -1871,6 +1871,7 @@ def _allgather_params_coalesced(self, param_list, hierarchy=0, quantize=False): return None + @torch.no_grad() def _allgather_params(self, param_list, hierarchy=0): if len(param_list) == 0: return diff --git a/deepspeed/runtime/zero/partitioned_param_coordinator.py b/deepspeed/runtime/zero/partitioned_param_coordinator.py index bdec8a55fcbc..caf478fca72f 100644 --- a/deepspeed/runtime/zero/partitioned_param_coordinator.py +++ b/deepspeed/runtime/zero/partitioned_param_coordinator.py @@ -178,7 +178,6 @@ def trace_prologue(self, sub_module: Module) -> None: force=True) self._invalidate_trace() - @compiler.disable def record_module(self, sub_module: Module) -> None: """adds sub module to trace""" if not self.is_record_trace(): diff --git a/deepspeed/runtime/zero/stage3.py b/deepspeed/runtime/zero/stage3.py index 3f43e865fa72..bcfcfcd00812 100644 --- a/deepspeed/runtime/zero/stage3.py +++ b/deepspeed/runtime/zero/stage3.py @@ -11,11 +11,12 @@ from deepspeed.utils import groups from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors +import deepspeed.runtime.compiler as compiler from deepspeed.runtime.base_optimizer import ZeROOptimizer from deepspeed.utils import logger from deepspeed.runtime.fp16.loss_scaler import CreateLossScaler from deepspeed.runtime.comm.coalesced_collectives import reduce_scatter_coalesced, all_to_all_quant_reduce -from deepspeed.runtime.utils import inf, get_global_norm, is_model_parallel_parameter, get_only_unique_item +from deepspeed.runtime.utils import inf, is_model_parallel_parameter, get_only_unique_item from deepspeed.runtime.zero.partition_parameters import * from deepspeed.runtime.zero.config import ZeroStageEnum from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum @@ -215,14 +216,12 @@ def __init__( self.module = module self.elastic_checkpoint = elastic_checkpoint - self.inf_or_nan_tracker: Tensor = torch.zeros(1, - dtype=torch.bool, - device=get_accelerator().current_device_name(), - requires_grad=False) + self.device = get_accelerator().current_device_name() if not self.offload_optimizer else OffloadDeviceEnum.cpu + + self.inf_or_nan_tracker: Tensor = torch.zeros(1, dtype=torch.bool, device=self.device, requires_grad=False) self.deepspeed_adam_offload = (self.offload_optimizer and type(init_optimizer) == DeepSpeedCPUAdam) - self.device = get_accelerator().current_device_name() if not self.offload_optimizer else OffloadDeviceEnum.cpu ### streams used for overlapping computation with communication self.reduce_and_partition_stream = None if get_accelerator().is_synchronized_device() else get_accelerator( ).Stream() if overlap_comm else get_accelerator().default_stream() @@ -1111,7 +1110,6 @@ def overlapping_partition_gradients_reduce_epilogue(self): def create_reduce_and_remove_grad_hooks(self): print_rank_0(f'[Begin] Create gradient reduction hooks') - self.grad_accs = [] self.leaf_parameters = defaultdict(list) for i, param_group in enumerate(self.fp16_groups): for param in param_group: @@ -1124,15 +1122,13 @@ def create_reduce_and_remove_grad_hooks(self): #print(f"After all gather {param.device}, {param.shape}") def wrapper(param): - param_tmp = param.expand_as(param) - grad_acc = param_tmp.grad_fn.next_functions[0][0] @instrument_w_nvtx def reduce_partition_and_remove_grads(*notneeded): self.reduce_ready_partitions_and_remove_grads(param) - self._grad_acc_hooks.append(grad_acc.register_hook(reduce_partition_and_remove_grads)) - self.grad_accs.append(grad_acc) + self._grad_acc_hooks.append( + param.register_post_accumulate_grad_hook(reduce_partition_and_remove_grads)) #print(f"param grad fn {param.expand_as(param).grad_fn}") if z3_leaf_parameter(param): @@ -1412,7 +1408,7 @@ def complete_grad_norm_calculation_for_cpu_offload(self, params): err = torch.tensor(-1.0, device=inf_or_nan.device, dtype=torch.float) total_norm = inf_or_nan * err + inf_or_nan.logical_not() * total_norm - return total_norm + return total_norm.cpu() @instrument_w_nvtx def partition_grads(self, params_to_release: List[Parameter], grad_partitions: List[Tensor]) -> None: @@ -1843,6 +1839,8 @@ def _get_norm_groups(self): norm_groups.append(self.get_grad_norm_direct(self.averaged_gradients[i], self.fp16_groups[i])) return norm_groups + # TODO: remove after SW-207183 is resolved + @compiler.disable @instrument_w_nvtx def _prepare_fp32_grad_for_sub_group(self, sub_group_id): partition_id = dist.get_rank(group=self.dp_process_group) @@ -2027,7 +2025,7 @@ def step(self, closure=None): return norm_groups = self._get_norm_groups() - scaled_global_grad_norm = get_global_norm(norm_list=norm_groups) + scaled_global_grad_norm = torch.norm(torch.stack(norm_groups)) # Stash unscaled gradient norm self._global_grad_norm = scaled_global_grad_norm / self.loss_scale @@ -2111,8 +2109,8 @@ def unscale_and_clip_grads(self, sub_group_id, total_norm): if self.clip_grad > 0.: # norm is in fact norm*scale clip = ((total_norm / self.loss_scale) + 1e-6) / self.clip_grad - if clip > 1: - combined_scale = clip * self.loss_scale + clip = torch.clamp(clip, min=1.0) + combined_scale = clip * self.loss_scale self.fp32_partitioned_groups_flat[sub_group_id].grad.mul_(1. / combined_scale) @@ -2147,7 +2145,8 @@ def has_overflow(self, partition_gradients=True): self.inf_or_nan_tracker += torch.isnan(self.grad_partitions_flat_buffer).any() self.inf_or_nan_tracker = self.inf_or_nan_tracker > 0 - overflow_gpu = self.inf_or_nan_tracker.clone().to(torch.uint8) + overflow_gpu = self.inf_or_nan_tracker.clone().to(get_accelerator().current_device_name()).to( + torch.uint8) self.inf_or_nan_tracker.zero_() if not get_accelerator().resolves_data_dependency(): @@ -2381,9 +2380,10 @@ def _set_param_groups(self, value): # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" def _get_loss_scale(self): if self.custom_loss_scaler: - return self.external_loss_scale + # TODO: SW-187114 Remove WA: cast self.loss_scale to float + return float(self.external_loss_scale) else: - return self.loss_scaler.cur_scale + return float(self.loss_scaler.cur_scale) def _set_loss_scale(self, value): self.loss_scaler.cur_scale = value diff --git a/deepspeed/runtime/zero/stage_1_and_2.py b/deepspeed/runtime/zero/stage_1_and_2.py index 7c8ae8e67b78..ebc0081b16c6 100755 --- a/deepspeed/runtime/zero/stage_1_and_2.py +++ b/deepspeed/runtime/zero/stage_1_and_2.py @@ -309,6 +309,7 @@ def __init__(self, for param in param_group['params']: if param.requires_grad: param.grad_accum = None + param.param_idx_in_group = len(trainable_parameters) trainable_parameters.append(param) self.bit16_groups.append(trainable_parameters) @@ -955,7 +956,7 @@ def reduce_independent_p_g_buckets_and_remove_grads(self, param, i): assert grad_reduc is not None, f"rank {dist.get_rank()} - Invalid to reduce Param {param_id} with None gradient" self.grads_in_ipg_bucket.append(grad_reduc) - self.params_in_ipg_bucket.append((i, param, param_id)) + self.params_in_ipg_bucket.append((i, param.param_idx_in_group, param_id)) #make sure the average tensor function knows how to average the gradients if is_moe_param(param): @@ -1059,8 +1060,9 @@ def average_tensor(self, tensor): process_group = self.dp_process_group # count = 0 - for i, param, param_id in self.params_in_ipg_bucket: + for i, param_idx_in_group, param_id in self.params_in_ipg_bucket: + param = self.bit16_groups[i][param_idx_in_group] process_group = self.dp_process_group grad_reduc = self.get_gradient_for_reduction(param) #Averages gradients at parameter level if ipg has a moe param @@ -1382,8 +1384,8 @@ def reduce_ipg_grads(self): stream = get_accelerator().current_stream() with get_accelerator().stream(stream): - for _, param, param_id in self.params_in_ipg_bucket: - + for group_idx, param_idx_in_group, param_id in self.params_in_ipg_bucket: + param = self.bit16_groups[group_idx][param_idx_in_group] assert self.params_already_reduced[param_id] == False, \ f"The parameter {param_id} has already been reduced. \ Gradient computed twice for this partition. \ @@ -2087,10 +2089,11 @@ def _set_param_groups(self, value): # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" def _get_loss_scale(self): + # TODO: SW-187114 Remove WA: cast self.loss_scale to float if self.custom_loss_scaler: - return self.external_loss_scale + return float(self.external_loss_scale) else: - return self.loss_scaler.cur_scale + return float(self.loss_scaler.cur_scale) def _set_loss_scale(self, value): self.loss_scaler.cur_scale = value diff --git a/deepspeed/sequence/layer.py b/deepspeed/sequence/layer.py index a715843d8eff..3cbdc4309a98 100644 --- a/deepspeed/sequence/layer.py +++ b/deepspeed/sequence/layer.py @@ -4,6 +4,7 @@ # DeepSpeed Team import torch +import os from typing import Any, Tuple from torch import Tensor @@ -11,6 +12,9 @@ import deepspeed.comm as dist +# TODO[SW-207148]: Remove the WA below once there is a proper fix +transpose_dim = int(os.environ.get('HL_DS_DISTRIBUTED_ATTENTION_SEQ_DIM', "2")) + def single_all_to_all(input, scatter_idx, gather_idx, group): seq_world_size = dist.get_world_size(group) @@ -33,7 +37,8 @@ def single_all_to_all(input, scatter_idx, gather_idx, group): # if scattering the seq-dim, transpose the heads back to the original dimension if scatter_idx < 2: - output = output.transpose(0, 2).contiguous() + # TODO[SW-207148]: Remove the WA below once there is a proper fix + output = output.transpose(0, transpose_dim).contiguous() return output.reshape( inp_shape[: gather_idx] + \ diff --git a/deepspeed/tools/__init__.py b/deepspeed/tools/__init__.py new file mode 100644 index 000000000000..36d8fbe03558 --- /dev/null +++ b/deepspeed/tools/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team diff --git a/deepspeed/tools/pg_sim/__init__.py b/deepspeed/tools/pg_sim/__init__.py new file mode 100644 index 000000000000..917c5a255fa8 --- /dev/null +++ b/deepspeed/tools/pg_sim/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .pg import install_sim_dist_backend diff --git a/deepspeed/tools/pg_sim/pg.py b/deepspeed/tools/pg_sim/pg.py new file mode 100644 index 000000000000..b7e2483f86d0 --- /dev/null +++ b/deepspeed/tools/pg_sim/pg.py @@ -0,0 +1,306 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import warnings +import torch +import torch.distributed as dist + +from datetime import timedelta +from functools import partial +from torch.distributed.constants import default_pg_timeout +from torch.distributed.distributed_c10d import (GroupMember, BroadcastOptions, AllreduceOptions, ReduceOp) +from torch.distributed.distributed_c10d import STORE_BASED_BARRIER_PREFIX # noqa +from deepspeed.accelerator import get_accelerator + + +class SimProcessGroup: + BACKEND = "sim" + DEFAULT_PG = None + WORLD_SIZE = -1 + STORE = None + + def __init__(self, rank, world_size, timeout, backend): + self.sim_rank = rank + self.pg_world_size = world_size + self.timeout = timeout + self.backend = backend + self.pg = None + self.torch_ver_major = int(torch.__version__.split('.')[0]) + self.torch_ver_minor = int(torch.__version__.split('.')[1]) + + assert self.torch_ver_major == 1, \ + f"Torch version major != 1 is not supported (version={torch.__version__})" + assert self.torch_ver_minor >= 10, \ + f"Torch version < 1.10 is not supported (version={torch.__version__})" + + if self.torch_ver_minor < 13: + warnings.warn(f"Torch version < 1.13 is not tested (version={torch.__version__})") + + # default is the first process group created + if SimProcessGroup.DEFAULT_PG is None: + SimProcessGroup.DEFAULT_PG = self + + @staticmethod + def get_dist_group_count(): + return torch.distributed.distributed_c10d._group_count + + @classmethod + def store_add_rest_of_world(cls, next_group): + group = cls.get_dist_group_count() + (1 if next_group else 0) + store_key = f"{STORE_BASED_BARRIER_PREFIX}:{group}" + cls.STORE.add(store_key, cls.WORLD_SIZE - 1) + + def _create_pg(self): + self.store_add_rest_of_world(next_group=False) + pg = dist.new_group(ranks=[0], timeout=self.timeout, backend=self.backend, pg_options=None) + return pg + + def post_create_sim_group(self): + self.pg = self._create_pg() + + @classmethod + def default_pg(cls): + assert cls.DEFAULT_PG is not None + return cls.DEFAULT_PG + + def size(self): + return self.pg_world_size + + def rank(self): + return self.sim_rank + + # ---------------------------------------------------- + # P2P + # + # P2P operations are simulated as all_reduce + # ---------------------------------------------------- + class P2PRequestObject: + """ Dummy p2p request object that is returned for p2p ops""" + + def __init__(self, src): + self.src = src + + def wait(self): + return + + def is_completed(self): + return True + + def _source_rank(self): + return self.src + + def _p2p_op(self, tensor_list, src=None): + opts = AllreduceOptions() + if self.torch_ver_minor > 10: + opts.reduceOp = ReduceOp.SUM + self.pg.allreduce(tensor_list, opts).wait() + src = src if src is not None else self.sim_rank + return SimProcessGroup.P2PRequestObject(src=src) + + def send(self, tensor_list, _group_dst_rank, _tag): + return self._p2p_op(tensor_list) + + def recv_anysource(self, tensor_list, _tag): + return self._p2p_op(tensor_list) + + def recv(self, tensor_list, src, _tag): + return self._p2p_op(tensor_list, src=src) + + # ---------------------------------------------------- + # Collectives + # + # For some collectives, it is required to shrink the + # input/output tensors_list to 1-element (world_size=1). + # also, need to make all other members of tensors_list to depend + # on the first element - to prevent incorrect graph signaling. + # The logic of shrink and then copy is handled by: + # - _adjust_tensors_list_to_ws1 + # - _copy_data_from_tensor_to_tensor_list + # ---------------------------------------------------- + @staticmethod + def _to_device(tensors, device): + if isinstance(tensors, dict): + return {k: SimProcessGroup._to_device(v, device) for k, v in tensors.items()} + elif isinstance(tensors, list): + return [SimProcessGroup._to_device(v, device) for v in tensors] + elif isinstance(tensors, torch.Tensor): + return tensors.to(device) + else: + assert False, 'Unsupported tensors type' + + def broadcast(self, tensors, opts): + """ ignore opts.rootRank and override to be the source """ + opts.rootRank = self.sim_rank + tensors = self._to_device(tensors, get_accelerator().current_device_name()) + return self.pg.broadcast(tensors, opts) + + def allreduce(self, tensors, opts): + return self.pg.allreduce(tensors, opts) + + def allreduce_coalesced(self, tensors, opts): + return self.pg.allreduce_coalesced(tensors, opts) + + def reduce(self, tensors, opts): + if opts.rootRank == self.sim_rank: + return self.pg.reduce(tensors, opts) + + broadcast_opts = BroadcastOptions() + broadcast_opts.rootRank = self.sim_rank + broadcast_opts.rootTensor = opts.rootTensor + return self.pg.broadcast(tensors, broadcast_opts) + + def _adjust_tensors_list_to_ws1(self, tensors_list): + """ receives list of lists of tensors and returns lists + of list-size-1 to match the world_size=1 + """ + world1_tensors_list = [] + for i, tensors in enumerate(tensors_list): + world1_tensors_list.append(tensors[self.sim_rank:self.sim_rank + 1]) + return world1_tensors_list + + @staticmethod + def _copy_data_from_tensor_to_tensor_list(source_tensors, tensors_list): + """ copy data from source tensors to all tensors in tensor list """ + for i, tensors in enumerate(tensors_list): + for t in tensors: + t.data[:] = source_tensors[i][0].data[:] + + def allgather(self, tensors_list, input_tensors, *kwargs): + world1_tensors_list = self._adjust_tensors_list_to_ws1(tensors_list) + handle = self.pg.allgather(world1_tensors_list, input_tensors, *kwargs) + self._copy_data_from_tensor_to_tensor_list(world1_tensors_list, tensors_list) + return handle + + def gather(self, output_tensors, input_tensors, opts): + if opts.rootRank == self.sim_rank: + world1_tensors_list = self._adjust_tensors_list_to_ws1(output_tensors) + handle = self.pg.gather(world1_tensors_list, input_tensors, opts) + self._copy_data_from_tensor_to_tensor_list(world1_tensors_list, output_tensors) + return handle + + broadcast_opts = BroadcastOptions() + broadcast_opts.rootRank = self.sim_rank + return self.pg.broadcast(input_tensors, broadcast_opts) + + def scatter(self, output_tensors, input_tensors, opts): + if opts.rootRank == self.sim_rank: + world1_tensors_list = self._adjust_tensors_list_to_ws1(input_tensors) + handle = self.pg.scatter(output_tensors, world1_tensors_list, opts) + self._copy_data_from_tensor_to_tensor_list(world1_tensors_list, input_tensors) + return handle + + broadcast_opts = BroadcastOptions() + broadcast_opts.rootRank = self.sim_rank + return self.pg.broadcast(output_tensors, broadcast_opts) + + def reduce_scatter(self, output_tensors, input_tensors, opts): + world1_tensors_list = self._adjust_tensors_list_to_ws1(input_tensors) + handle = self.pg.reduce_scatter(output_tensors, world1_tensors_list, opts) + self._copy_data_from_tensor_to_tensor_list(world1_tensors_list, input_tensors) + return handle + + def alltoall(self, output_tensors, input_tensors, _opts): + world1_in_tensors_list = input_tensors[self.sim_rank:self.sim_rank + 1] + world1_out_tensors_list = output_tensors[self.sim_rank:self.sim_rank + 1] + world1_out_tensors_list[0].data[:] = world1_in_tensors_list[0].data[:] + opts = AllreduceOptions() + if self.torch_ver_minor > 10: + opts.reduceOp = ReduceOp.SUM + handle = self.pg.allreduce(world1_out_tensors_list, opts) + return handle + + def barrier(self, opts): + opts.device_ids = [self.sim_rank] + return self.pg.barrier(opts) + + # ---------------------------------------------------- + # Create group registered function + # ---------------------------------------------------- + @classmethod + def create(cls, _store, rank, world_size, timeout, backend): + return cls(rank, world_size, timeout, backend) + + +def install_sim_dist_backend(sim_world_size, sim_rank): + + def wrapped_dist_init_process_group(backend, + init_method=None, + timeout=default_pg_timeout, + world_size=-1, + rank=-1, + store=None, + group_name="", + pg_options=None): + assert world_size == -1 or world_size == sim_world_size, \ + f'Inconsistent world_size: sim={sim_world_size} dist_init={world_size}' + + assert rank == -1 or rank == sim_rank, \ + f'Inconsistent rank: sim={sim_rank} dist_init={rank}' + + if backend == 'hccl': + import habana_frameworks.torch.distributed.hccl # noqa: F401 + + # override provided init_method/store with a dummy store + # For debug, it is better to use FileStore: + # import os + # my_store_filename = '/tmp/my_store' + # os.remove(my_store_filename) if os.path.exists(my_store_filename) else None + # os.remove(my_store_filename) + # store = torch.distributed.FileStore(my_store_filename, world_size) + store = torch.distributed.TCPStore(host_name="localhost", + port=12355, + world_size=sim_world_size, + is_master=True, + timeout=timedelta(seconds=300), + wait_for_workers=False) + + # set the simulated world size + SimProcessGroup.WORLD_SIZE = sim_world_size + SimProcessGroup.STORE = store + + # register sim backend + # create_fn = partial(SimProcessGroup.create, backend=default_backend) + create_fn = partial(SimProcessGroup.create, backend=backend) + dist.Backend.register_backend(SimProcessGroup.BACKEND, create_fn) + + # emulate all other world devices has joined the newly created group + SimProcessGroup.store_add_rest_of_world(next_group=True) + + orig_dist_init_process_group(backend=SimProcessGroup.BACKEND, + timeout=timeout, + world_size=sim_world_size, + rank=sim_rank, + store=store, + group_name=group_name, + pg_options=pg_options) + + SimProcessGroup.default_pg().post_create_sim_group() + + def wrapped_dist_new_group(ranks=None, timeout=default_pg_timeout, backend=None, pg_options=None): + SimProcessGroup.store_add_rest_of_world(next_group=True) + pg = orig_dist_new_group(ranks=ranks, timeout=timeout, backend=backend, pg_options=pg_options) + + if pg != GroupMember.NON_GROUP_MEMBER: + if backend is None or backend == SimProcessGroup.BACKEND: + pg.post_create_sim_group() + + return pg + + def wrapped_dist_broadcast_object_list(object_list, src=0, group=None, device=None): + rank = SimProcessGroup.default_pg().sim_rank + if src != sim_rank: + raise RuntimeError(f'SimProcessGroup does not support dist.broadcast_object_list() ' + f'for src={src} different than sim_rank={rank}') + return orig_dist_broadcast_object_list(object_list, src, group, device) + + orig_dist_init_process_group = dist.init_process_group + dist.init_process_group = wrapped_dist_init_process_group + + orig_dist_new_group = dist.new_group + dist.new_group = wrapped_dist_new_group + + orig_dist_broadcast_object_list = dist.broadcast_object_list + dist.broadcast_object_list = wrapped_dist_broadcast_object_list diff --git a/deepspeed/tools/pg_sim/ut/base.py b/deepspeed/tools/pg_sim/ut/base.py new file mode 100644 index 000000000000..24889f944070 --- /dev/null +++ b/deepspeed/tools/pg_sim/ut/base.py @@ -0,0 +1,311 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import unittest +import functools +import torch +import torch.distributed as dist +import pytest + +from pg_sim.pg import (install_sim_dist_backend, GroupMember) + + +class TestBaseWrapper: + """ + BaseTestWrapper class ensures that the test cases encapsulated + in ProcessGroupSimTestBase will only be executed by subclasses. + """ + + class ProcessGroupSimTestBase(unittest.TestCase): + + def setUp(self) -> None: + self.world_size = 8 + self.rank = 0 + self.backend = self.get_backend() + self.device = self.get_device() + + self.assertIsNotNone(self.backend) + self.assertIsNotNone(self.device) + + install_sim_dist_backend(sim_world_size=self.world_size, sim_rank=self.rank) + + dist.init_process_group(backend=self.backend, + init_method=None, + store=None, + rank=self.rank, + world_size=self.world_size) + + def get_backend(self): + self.assertTrue(False, msg='get_backend must be implemented by derived test') + + def get_device(self): + self.assertTrue(False, msg='get_device must be implemented by derived test') + + def _get_row_first_rank(self): + row_ranks = list(set(range(self.world_size)) - {self.rank}) + return row_ranks[0] if row_ranks else None + + @staticmethod + def _get_torch_version(): + return int(torch.__version__.split('.')[1]) + + @pytest.mark.forked + def test_world(self): + res_rank = dist.get_rank() + res_ws = dist.get_world_size() + self.assertEqual(res_rank, self.rank) + self.assertEqual(res_ws, self.world_size) + + @pytest.mark.forked + def test_new_group(self): + t = torch.tensor([1, 2]).to(self.device) + t_in_out = t.clone() + + pg_1 = dist.new_group(ranks=[self.rank]) + dist.all_reduce(t_in_out, op=dist.ReduceOp.SUM, group=pg_1) + self.assertTrue(t.eq(t_in_out).all()) + + row_rank = self._get_row_first_rank() + if row_rank: + pg_2 = dist.new_group(ranks=[row_rank]) + self.assertEqual(pg_2, GroupMember.NON_GROUP_MEMBER) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + def _test_broadcast_impl(self, src): + t = torch.tensor([1, 2]).to(self.device) + handle = dist.broadcast(t, src=src, async_op=False) + self.assertIsNone(handle) + + t = torch.tensor([1, 2]).to(self.device) + handle = dist.broadcast(t, src=src, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + + @pytest.mark.forked + def test_broadcast_src(self): + self._test_broadcast_impl(src=self.rank) + + @pytest.mark.forked + def test_broadcast_dst(self): + row_rank = self._get_row_first_rank() + if row_rank: + self._test_broadcast_impl(src=row_rank) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + def _test_broadcast_object_type_impl(self, src): + if dist.get_rank() == src: + objects = ["foo", 12, {1: 2}] + else: + objects = [None, None, None] + + dev = torch.device(self.device) + dist.broadcast_object_list(objects, src=src, device=dev) + + @pytest.mark.forked + def test_broadcast_object_type_src(self): + self._test_broadcast_object_type_impl(src=self.rank) + + @pytest.mark.forked + def test_broadcast_object_type_dst(self): + row_rank = self._get_row_first_rank() + if row_rank: + with pytest.raises(RuntimeError): + self._test_broadcast_object_type_impl(src=row_rank) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + @pytest.mark.forked + def test_all_reduce(self): + t = torch.tensor([1, 2]).to(self.device) + t_in_out = t.clone() + dist.all_reduce(t_in_out, op=dist.ReduceOp.SUM) + self.assertTrue(t.eq(t_in_out).all()) + + def _test_reduce_impl(self, dst): + t = torch.tensor([1.0, 2.0]).to(self.device) + t_in_out = t.clone() + + handle = dist.reduce(t_in_out, dst=dst, op=dist.ReduceOp.SUM, async_op=False) + self.assertIsNone(handle) + self.assertTrue(t.eq(t_in_out).all()) + + handle = dist.reduce(t_in_out, dst=dst, op=dist.ReduceOp.SUM, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + self.assertTrue(t.eq(t_in_out).all()) + + @pytest.mark.forked + def test_reduce_src(self): + self._test_reduce_impl(dst=self.rank) + + @pytest.mark.forked + def test_reduce_dst(self): + row_rank = self._get_row_first_rank() + if row_rank: + self._test_reduce_impl(dst=row_rank) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + @pytest.mark.forked + def test_all_gather(self): + tensor_list = [torch.zeros(2).to(self.device) for _ in range(self.world_size)] + tensor = torch.ones(2).to(self.device) + + handle = dist.all_gather(tensor_list, tensor, async_op=False) + self.assertIsNone(handle) + self.assertTrue(tensor_list[0].eq(tensor).all()) + + handle = dist.all_gather(tensor_list, tensor, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + self.assertTrue(tensor_list[0].eq(tensor).all()) + + def _test_gather_impl(self, dst, local_dst): + torch_version = self._get_torch_version() + if (self.backend == 'nccl') and (torch_version <= 10): + self.skipTest(f'Skipping {self._testMethodName} for nccl ' + f'for torch.version={torch_version}') + + tensor = torch.ones(2).to(self.device) + gather_list = [torch.zeros(2).to(self.device) for _ in range(self.world_size)] if local_dst else None + + handle = dist.gather(tensor, gather_list, dst=dst, async_op=False) + self.assertIsNone(handle) + if local_dst: + self.assertTrue(gather_list[dst].eq(tensor).all()) + + handle = dist.gather(tensor, gather_list, dst=dst, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + if local_dst: + self.assertTrue(gather_list[dst].eq(tensor).all()) + + @pytest.mark.forked + def test_gather_src(self): + self._test_gather_impl(dst=self.rank, local_dst=True) + + @pytest.mark.forked + def test_gather_not_src(self): + row_rank = self._get_row_first_rank() + if row_rank: + self._test_gather_impl(dst=row_rank, local_dst=False) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + def _test_scatter_impl(self, src, local_src): + if self.backend not in ('gloo', 'mpi'): + self.skipTest(f'Skipping {self._testMethodName} for {self.backend}') + + tensor = torch.ones(2).to(self.device) + scatter_list = [torch.zeros(2).to(self.device) for _ in range(self.world_size)] if local_src else None + + handle = dist.scatter(tensor, scatter_list, src=src, async_op=False) + self.assertIsNone(handle) + if local_src: + self.assertTrue(scatter_list[src].eq(tensor).all()) + + handle = dist.scatter(tensor, scatter_list, src=src, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + if local_src: + self.assertTrue(scatter_list[src].eq(tensor).all()) + + @pytest.mark.forked + def test_scatter_src(self): + self._test_scatter_impl(src=self.rank, local_src=True) + + @pytest.mark.forked + def test_scatter_not_src(self): + row_rank = self._get_row_first_rank() + if row_rank: + self._test_scatter_impl(src=row_rank, local_src=False) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + @pytest.mark.forked + def test_reduce_scatter(self): + if self.backend not in ('nccl', 'hccl'): + self.skipTest(f'Skipping {self._testMethodName} for {self.backend}') + + output = torch.ones(2).to(self.device) + input_list = [torch.zeros(2).to(self.device) for _ in range(self.world_size)] + + handle = dist.reduce_scatter(output, input_list, async_op=False) + self.assertIsNone(handle) + self.assertTrue(input_list[self.rank].eq(output).all()) + + handle = dist.reduce_scatter(output, input_list, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + self.assertTrue(input_list[self.rank].eq(output).all()) + + @pytest.mark.forked + def test_all_to_all(self): + if self.backend not in ('nccl', 'hccl', 'mpi'): + self.skipTest(f'Skipping {self._testMethodName} for {self.backend}') + + output_list = [torch.zeros(1).to(self.device) for _ in range(self.world_size)] + input_list = list( + torch.arange(self.world_size, dtype=torch.float32).add(1.).to(self.device).chunk(self.world_size)) + + expected_res = [ + torch.zeros(1).to(self.device) if i != self.rank else torch.ones(1).to(self.device) + for i in range(self.world_size) + ] + + handle = dist.all_to_all(output_list, input_list, async_op=False) + self.assertIsNone(handle) + self.assertTrue( + functools.reduce(lambda x, y: x and y, map(lambda p, q: p == q, expected_res, output_list), True)) + + handle = dist.all_to_all(output_list, input_list, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + self.assertTrue( + functools.reduce(lambda x, y: x and y, map(lambda p, q: p == q, expected_res, output_list), True)) + + @pytest.mark.forked + def test_barrier(self): + handle = dist.barrier(async_op=False) + self.assertIsNone(handle) + + handle = dist.barrier(async_op=True) + self.assertIsNotNone(handle) + handle.wait() + + @pytest.mark.forked + def test_p2p_send(self): + tensor = torch.ones(2).to(self.device) + dist.send(tensor, dst=self.rank, group=None, tag=0) + + row_rank = self._get_row_first_rank() + dist.send(tensor, dst=row_rank, group=None, tag=0) if row_rank else None + + handle = dist.isend(tensor, dst=self.rank, group=None, tag=0) + self.assertIsNotNone(handle) + handle.wait() + + handle = dist.isend(tensor, dst=row_rank, group=None, tag=0) + self.assertIsNotNone(handle) + handle.wait() + + @pytest.mark.forked + def test_p2p_recv(self): + tensor = torch.zeros(2).to(self.device) + dist.recv(tensor, src=self.rank, group=None, tag=0) + + row_rank = self._get_row_first_rank() + dist.recv(tensor, src=row_rank, group=None, tag=0) if row_rank else None + + handle = dist.irecv(tensor, src=self.rank, group=None, tag=0) + self.assertIsNotNone(handle) + handle.wait() + + handle = dist.irecv(tensor, src=row_rank, group=None, tag=0) + self.assertIsNotNone(handle) + handle.wait() diff --git a/deepspeed/tools/pg_sim/ut/gloo.py b/deepspeed/tools/pg_sim/ut/gloo.py new file mode 100644 index 000000000000..347f9b96a741 --- /dev/null +++ b/deepspeed/tools/pg_sim/ut/gloo.py @@ -0,0 +1,24 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import unittest +from pg_sim.ut.base import TestBaseWrapper + + +class ProcessGroupSimTestGloo(TestBaseWrapper.ProcessGroupSimTestBase): + + def setUp(self) -> None: + super(ProcessGroupSimTestGloo, self).setUp() + + def get_backend(self): + return 'gloo' + + def get_device(self): + return 'cpu' + + +if __name__ == '__main__': + unittest.main() diff --git a/deepspeed/tools/pg_sim/ut/hccl.py b/deepspeed/tools/pg_sim/ut/hccl.py new file mode 100644 index 000000000000..37fabcfb9363 --- /dev/null +++ b/deepspeed/tools/pg_sim/ut/hccl.py @@ -0,0 +1,24 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import unittest +from pg_sim.ut.base import TestBaseWrapper + + +class ProcessGroupSimTestHccl(TestBaseWrapper.ProcessGroupSimTestBase): + + def setUp(self) -> None: + super(ProcessGroupSimTestHccl, self).setUp() + + def get_backend(self): + return 'hccl' + + def get_device(self): + return 'hpu' + + +if __name__ == '__main__': + unittest.main() diff --git a/deepspeed/tools/pg_sim/ut/nccl.py b/deepspeed/tools/pg_sim/ut/nccl.py new file mode 100644 index 000000000000..da6b8f23ed47 --- /dev/null +++ b/deepspeed/tools/pg_sim/ut/nccl.py @@ -0,0 +1,24 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import unittest +from pg_sim.ut.base import TestBaseWrapper + + +class ProcessGroupSimTestNccl(TestBaseWrapper.ProcessGroupSimTestBase): + + def setUp(self) -> None: + super(ProcessGroupSimTestNccl, self).setUp() + + def get_backend(self): + return 'nccl' + + def get_device(self): + return 'cuda' + + +if __name__ == '__main__': + unittest.main() diff --git a/deepspeed/tools/tensor_logger/__init__.py b/deepspeed/tools/tensor_logger/__init__.py new file mode 100644 index 000000000000..55ea7227e48d --- /dev/null +++ b/deepspeed/tools/tensor_logger/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .tensor_logger import TensorLogger, save_logged_tensors diff --git a/deepspeed/tools/tensor_logger/tensor_logger.py b/deepspeed/tools/tensor_logger/tensor_logger.py new file mode 100644 index 000000000000..f128b9a6b0a0 --- /dev/null +++ b/deepspeed/tools/tensor_logger/tensor_logger.py @@ -0,0 +1,234 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import collections +from functools import partial +from contextlib import contextmanager +from torch.functional import Tensor +from os import makedirs +from os.path import join + + +class TensorLogger: + """ Records nn.Module's activations and gradients tensors + + Description: + Records up to end_iteration(if 0, recording is disabled) from start_iteration. + If log_activations_enabled, nn.Module's activations are recorded during forward. + If log_grads_enabled, nn.Module's gradients are recorded during back propagation. + If log_inputs_enabled, model inputs are recorded. + + Usage: + Integrated within the training loop: + tensor_logger = TensorLogger(model, start_iteration=2, end_iteration=2) + # dumps second iteration only, iteration number starts with 1 + + for i, samples in enumerate(data_loader) # training loop + with tensor_logger.log_iteration(i): + # run forward/backward iteration + + tensor_logger.save(filename) + + Another alternative: + tensor_logger = TensorLogger(model, end_iteration=2) + + for i, samples in enumerate(data_loader) # training loop + with tensor_logger: + tensor_logger.set_iteration(i) + # run forward/backward iteration + + tensor_logger.save(filename) + + Implementation notes: + forward/backward activations/gradients are collected using nn.Module hooks. + However, model inputs are collected by overloading model.forward() method. + Model inputs can't be collected using the hooks since the hooks only provide + inputs and do not provide kwargs, if exist, of the forward method. + """ + + def __init__(self, + model, + start_iteration=0, + end_iteration=0, + log_activations_enabled=False, + log_grads_enabled=False, + log_inputs_enabled=False, + prefix=None): + + # for now, no support for virtual pipeline (interleaved) + if isinstance(model, list): + assert len(model) == 1, 'No support for list of multiple models (len={})'.format(len(model)) + model = model[0] + + self.model = model + self.start_iteration = start_iteration + self.end_iteration = end_iteration + self.log_activations_enabled = log_activations_enabled + self.log_grads_enabled = log_grads_enabled + self.log_inputs_enabled = log_inputs_enabled + self.prefix = 'model' if prefix is None else prefix + + # captured tensors are saved in the following hierarchy: + # { + # iteration: { # iteration number + # tensor_type: { # fwd_act/bwd_grad_in/bwd_grad_out + # name: [tensors] # tensor name's tensors. list is required due to e.g. grad accumulation + # } + # } + # } + class IterData(dict): + + def __init__(self): + super(IterData, self).__init__() + self['fwd_act'] = collections.defaultdict(list) + self['bwd_grad_in'] = collections.defaultdict(list) + self['bwd_grad_out'] = collections.defaultdict(list) + self['model_inputs'] = collections.defaultdict(list) + + self.data = collections.defaultdict(IterData) + self.active = False + self.current_iteration = 0 + self.fwd_handles = [] + self.bwd_handles = [] + + def _fqn(self, name): + return '.'.join([self.prefix, name]) if name else self.prefix + + def set_iteration(self, iteration): + self.current_iteration = iteration + + def get_num_recorded_iterations(self): + return len(self.data) + + @contextmanager + def log_iteration(self, iteration): + self.current_iteration = iteration + self._enable() + yield self + self._disable() + + def __enter__(self): + self._enable() + return self + + def __exit__(self): + self._disable() + + def clear(self): + self.data.clear() + + def save(self, filename, do_clear=True): + + def convert_for_pickle(obj): + if isinstance(obj, dict): + return {k: convert_for_pickle(v) for k, v in obj.items()} + elif isinstance(obj, list): + return [convert_for_pickle(e) for e in obj] + elif isinstance(obj, tuple): + return tuple([convert_for_pickle(e) for e in obj]) + else: + if isinstance(obj, Tensor): + return obj.detach().cpu() + else: + return obj + + data = convert_for_pickle(self.data) + torch.save(data, filename) + self.clear() if do_clear else None + + def _enable(self): + if not self.active and self.start_iteration <= self.current_iteration <= self.end_iteration: + self.active = True + self._enable_log_grads() if self.log_grads_enabled else None + self._enable_log_activations() if self.log_activations_enabled else None + self._enable_log_inputs() if self.log_inputs_enabled else None + + def _disable(self): + if self.active: + self.active = False + self._disable_log_grads() + self._disable_log_activations() + self._disable_log_inputs() + + @staticmethod + def _extract_tensors(t): + if t is None: + return None + elif isinstance(t, int): + return torch.tensor(t) + elif isinstance(t, torch.Tensor): + return t.detach().contiguous() + elif isinstance(t, list): + return [TensorLogger._extract_tensors(e) for e in t] + elif isinstance(t, tuple): + return tuple(TensorLogger._extract_tensors(e) for e in t) + elif isinstance(t, dict): + return {k: TensorLogger._extract_tensors(v) for k, v in t.items()} + assert False, 'Unsupported type: {}'.format(type(t)) + + def _save_fwd_activation(self, name, _mod, _inp, out): + fwd_act = self._extract_tensors(out) + self.data[self.current_iteration]['fwd_act'][name].append(fwd_act) + + def _save_bwd_grads(self, name, _mod, grad_input, grad_output): + grad_in = self._extract_tensors(grad_input) + grad_out = self._extract_tensors(grad_output) + self.data[self.current_iteration]['bwd_grad_in'][name].append(grad_in) + self.data[self.current_iteration]['bwd_grad_out'][name].append(grad_out) + + def _save_inputs(self, *inp, **kwargs): + model_inputs = self._extract_tensors(inp) + model_kwargs = self._extract_tensors(kwargs) + self.data[self.current_iteration]['model_inputs']['inputs'].append(model_inputs) + self.data[self.current_iteration]['model_inputs']['kwargs'].append(model_kwargs) + + def _enable_log_grads(self): + #Revert after [SW-69765] is fixed + full_bwd_hook_supported = False + for name, m in self.model.named_modules(): + register_fn = m.register_full_backward_hook if full_bwd_hook_supported else m.register_backward_hook + h = register_fn(partial(self._save_bwd_grads, self._fqn(name))) + self.bwd_handles.append(h) + + def _enable_log_activations(self): + for name, m in self.model.named_modules(): + h = m.register_forward_hook(partial(self._save_fwd_activation, self._fqn(name))) + self.fwd_handles.append(h) + + def _enable_log_inputs(self): + + def wrapped_forward(*inputs, **kwargs): + self._save_inputs(*inputs, **kwargs) + return self.model.original_forward__(*inputs, **kwargs) + + self.model.original_forward__ = self.model.forward + self.model.forward = wrapped_forward + + def _disable_log_grads(self): + for h in self.bwd_handles: + h.remove() + self.bwd_handles = [] + + def _disable_log_activations(self): + for h in self.fwd_handles: + h.remove() + self.fwd_handles = [] + + def _disable_log_inputs(self): + if hasattr(self.model, 'original_forward__'): + self.model.forward = self.model.original_forward__ + del self.model.original_forward__ + + +def save_logged_tensors(tensor_logger: TensorLogger, tensor_logger_path, rank_no, iteration=None): + if tensor_logger.get_num_recorded_iterations(): + makedirs(tensor_logger_path, exist_ok=True) + filename = 'tensor_logger_rank_{}'.format(rank_no) + '.pt' + if iteration is not None: + filename = 'tensor_logger_rank_{}_iter_{}'.format(rank_no, iteration) + '.pt' + fullname = join(tensor_logger_path, filename) + tensor_logger.save(fullname) diff --git a/deepspeed/utils/groups.py b/deepspeed/utils/groups.py index c49f4520e16e..381c1f895652 100644 --- a/deepspeed/utils/groups.py +++ b/deepspeed/utils/groups.py @@ -34,6 +34,8 @@ _EXPERT_PARALLEL_GROUP = {} # Expert data parallel group that the current rank belongs to. _EXPERT_DATA_PARALLEL_GROUP = {} +# Expert Parallel combined with Tensor Parallel that the current rank belongs to. +_EXPERT_TENSOR_PARALLEL_GROUP = {} # dist world group needs to be cloned for some cases _WORLD_GROUP = None # ZeRO parameter partitioning group that the current rank belongs to. @@ -251,7 +253,36 @@ def _get_expert_parallel_ranks(world_size, return expert_parallel_groups, expert_data_parallel_groups -def _create_expert_data_and_model_parallel(expert_parallel_size_, mpu, use_data_before_expert_parallel_=False): +def _get_expert_tensor_parallel_ranks(expert_parallel_groups): + # create a dict from each rank to the ep_group ranks it belongs to + rank_to_ep_group = {} + for ranks in expert_parallel_groups: + for rank in ranks: + rank_to_ep_group[rank] = ranks + + # go over all tensor groups, rank by rank + # for each rank, add the ep_ranks to current tensor group, if not already added + # in order to add ep ranks only once, we delete all rank members from rank_to_ep_group + global expert_tensor_parallel_world_size + world_size = dist.get_world_size() + expert_tensor_parallel_groups = [] + for i in range(world_size // expert_tensor_parallel_world_size): + ep_tp_ranks = [] + for t in range(expert_tensor_parallel_world_size): + rank = i * expert_tensor_parallel_world_size + t + ep_ranks = rank_to_ep_group.get(rank, []) + for r in ep_ranks: + rank_to_ep_group.pop(r) + ep_tp_ranks.extend(ep_ranks) + if ep_tp_ranks: + expert_tensor_parallel_groups.append(sorted(ep_tp_ranks)) + return expert_tensor_parallel_groups + + +def _create_expert_data_and_model_parallel(expert_parallel_size_, + mpu, + use_data_before_expert_parallel_=False, + create_expert_tensor_parallel_group=False): """ Create expert and data parallel groups based on MPU (model parallel) group. @@ -304,6 +335,18 @@ def _create_expert_data_and_model_parallel(expert_parallel_size_, mpu, use_data_ if rank in list(ranks): _EXPERT_DATA_PARALLEL_GROUP[group_name] = group + if create_expert_tensor_parallel_group: + # calculate ep_tp_groups and validate correct number of groups + expert_tensor_parallel_groups = _get_expert_tensor_parallel_ranks(expert_parallel_groups) + n_ep_tp_groups = world_size // expert_parallel_size_ // expert_tensor_parallel_world_size + assert n_ep_tp_groups == len(expert_tensor_parallel_groups) + + # create groups + for ranks in expert_tensor_parallel_groups: + group = dist.new_group(ranks) + if rank in list(ranks): + _EXPERT_TENSOR_PARALLEL_GROUP[group_name] = group + def _get_max_expert_size(): """Get the maximum ep_size from all the created groups.""" @@ -350,6 +393,18 @@ def _get_expert_data_parallel_group_dict(): return _EXPERT_DATA_PARALLEL_GROUP +def _get_expert_tensor_parallel_group(group_name): + """Get the expert tensor parallel group the caller rank belongs to.""" + assert group_name in _EXPERT_TENSOR_PARALLEL_GROUP, \ + f'expert tensor parallel group is not initialized for {group_name=}' + return _EXPERT_TENSOR_PARALLEL_GROUP[group_name] + + +def _get_expert_tensor_parallel_group_dict(): + """Get the expert tensor parallel group dict.""" + return _EXPERT_TENSOR_PARALLEL_GROUP + + def _clone_world_group(): """Create a clone of the world group Note: We need to clone the dist world group because we diff --git a/deepspeed/utils/logging.py b/deepspeed/utils/logging.py index 1e62d96e1032..97496f7db487 100644 --- a/deepspeed/utils/logging.py +++ b/deepspeed/utils/logging.py @@ -7,6 +7,7 @@ import logging import sys import os +import torch log_levels = { "debug": logging.DEBUG, @@ -19,6 +20,18 @@ class LoggerFactory: + @staticmethod + def logging_decorator(func): + + @functools.wraps(func) + def wrapper(*args, **kwargs): + if torch._dynamo.is_compiling(): + return + else: + return func(*args, **kwargs) + + return wrapper + @staticmethod def create_logger(name=None, level=logging.INFO): """create a logger @@ -44,6 +57,10 @@ def create_logger(name=None, level=logging.INFO): ch.setLevel(level) ch.setFormatter(formatter) logger_.addHandler(ch) + if os.getenv("DISABLE_LOGS_WHILE_COMPILING", "0") == "1": + for method in ['info', 'debug', 'error', 'warning', 'critical', 'exception']: + original_logger = getattr(logger_, method) + setattr(logger_, method, LoggerFactory.logging_decorator(original_logger)) return logger_ diff --git a/op_builder/cpu/builder.py b/op_builder/cpu/builder.py index d881842ad0b1..d2bc8eacfa25 100644 --- a/op_builder/cpu/builder.py +++ b/op_builder/cpu/builder.py @@ -30,11 +30,7 @@ def builder(self): return cpp_ext def cxx_args(self): - args = ['-O3', '-g', '-Wno-reorder'] - CPU_ARCH = self.cpu_arch() - SIMD_WIDTH = self.simd_width() - args += [CPU_ARCH, '-fopenmp', SIMD_WIDTH] - return args + return ['-O3', '-g', '-Wno-reorder'] def libraries_args(self): return [] diff --git a/op_builder/evoformer_attn.py b/op_builder/evoformer_attn.py index 6e7721f94e01..af3aa7429775 100644 --- a/op_builder/evoformer_attn.py +++ b/op_builder/evoformer_attn.py @@ -41,18 +41,21 @@ def nvcc_args(self): args.append(f"-DGPU_ARCH={major}{minor}") return args - def is_compatible(self, verbose=True): + def is_compatible(self, verbose=False): try: import torch except ImportError: - self.warning("Please install torch if trying to pre-compile kernels") + if verbose: + self.warning("Please install torch if trying to pre-compile kernels") return False if self.cutlass_path is None: - self.warning("Please specify the CUTLASS repo directory as environment variable $CUTLASS_PATH") + if verbose: + self.warning("Please specify the CUTLASS repo directory as environment variable $CUTLASS_PATH") return False with open(f'{self.cutlass_path}/CHANGELOG.md', 'r') as f: if '3.1.0' not in f.read(): - self.warning("Please use CUTLASS version >= 3.1.0") + if verbose: + self.warning("Please use CUTLASS version >= 3.1.0") return False cuda_okay = True if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda @@ -60,10 +63,12 @@ def is_compatible(self, verbose=True): torch_cuda_major = int(torch.version.cuda.split('.')[0]) cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda if cuda_capability < 7: - self.warning("Please use a GPU with compute capability >= 7.0") + if verbose: + self.warning("Please use a GPU with compute capability >= 7.0") cuda_okay = False if torch_cuda_major < 11 or sys_cuda_major < 11: - self.warning("Please use CUDA 11+") + if verbose: + self.warning("Please use CUDA 11+") cuda_okay = False return super().is_compatible(verbose) and cuda_okay diff --git a/op_builder/fp_quantizer.py b/op_builder/fp_quantizer.py index 9f07ec3d1275..75ee54c09bf6 100644 --- a/op_builder/fp_quantizer.py +++ b/op_builder/fp_quantizer.py @@ -3,6 +3,11 @@ # DeepSpeed Team +try: + from packaging import version as pkg_version +except ImportError: + pkg_version = None + from .builder import CUDAOpBuilder, installed_cuda_version @@ -17,11 +22,12 @@ def __init__(self, name=None): def absolute_name(self): return f'deepspeed.ops.fp_quantizer.{self.NAME}_op' - def is_compatible(self, verbose=True): + def is_compatible(self, verbose=False): try: import torch except ImportError: - self.warning("Please install torch if trying to pre-compile inference kernels") + if verbose: + self.warning("Please install torch if trying to pre-compile inference kernels") return False cuda_okay = True @@ -30,12 +36,43 @@ def is_compatible(self, verbose=True): torch_cuda_major = int(torch.version.cuda.split('.')[0]) cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda if cuda_capability < 8: - self.warning("NVIDIA Inference is only supported on Ampere and newer architectures") + if verbose: + self.warning("NVIDIA Inference is only supported on Ampere and newer architectures") cuda_okay = False if cuda_capability >= 8: if torch_cuda_major < 11 or sys_cuda_major < 11: - self.warning("On Ampere and higher architectures please use CUDA 11+") + if verbose: + self.warning("On Ampere and higher architectures please use CUDA 11+") cuda_okay = False + + try: + import triton + except ImportError: + if verbose: + self.warning( + f"please install triton==2.3.0, 2.3.1 or 3.0.0 if you want to use the FP Quantizer Kernels") + return False + + # triton 2.3.{0,1} and 3.0.0 are ok. + allowed_versions = ("2.3", "3.0") + if pkg_version: + allowed = (pkg_version.parse(v) for v in allowed_versions) + installed_triton = pkg_version.parse(triton.__version__) + triton_mismatch = all(installed_triton.major != a.major or installed_triton.minor != a.minor + for a in allowed) + else: + installed_triton = triton.__version__ + major, minor, _ = installed_triton.split(".") + allowed = (v.split(".") for v in allowed_versions) + triton_mismatch = all(major != v[0] or minor != v[1] for v in allowed) + + if triton_mismatch: + if verbose: + self.warning( + f"FP Quantizer is using an untested triton version ({installed_triton}), only 2.3.{0,1} and 3.0.0 are known to be compatible with these kernels" + ) + return False + return super().is_compatible(verbose) and cuda_okay def filter_ccs(self, ccs): diff --git a/op_builder/hpu/__init__.py b/op_builder/hpu/__init__.py index 6527ace087b5..5ad1b9a7f891 100644 --- a/op_builder/hpu/__init__.py +++ b/op_builder/hpu/__init__.py @@ -7,4 +7,5 @@ from .cpu_adam import CPUAdamBuilder from .fused_adam import FusedAdamBuilder +from .transformer_inference import InferenceBuilder from .no_impl import NotImplementedBuilder diff --git a/op_builder/hpu/builder.py b/op_builder/hpu/builder.py index c176a586ba49..3c86128fffd6 100644 --- a/op_builder/hpu/builder.py +++ b/op_builder/hpu/builder.py @@ -31,11 +31,7 @@ def builder(self): return cpp_ext def cxx_args(self): - args = ['-O3', '-g', '-Wno-reorder'] - CPU_ARCH = self.cpu_arch() - SIMD_WIDTH = self.simd_width() - args += [CPU_ARCH, '-fopenmp', SIMD_WIDTH] - return args + return ['-O3', '-g', '-Wno-reorder'] def libraries_args(self): return [] diff --git a/op_builder/hpu/fp_quantizer.py b/op_builder/hpu/fp_quantizer.py new file mode 100644 index 000000000000..84f31ae2efe0 --- /dev/null +++ b/op_builder/hpu/fp_quantizer.py @@ -0,0 +1,39 @@ +# Copyright (c) 2024 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 +import importlib + +# DeepSpeed Team + +try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 # type: ignore + from op_builder.builder import OpBuilder +except ImportError: + from deepspeed.ops.op_builder.builder import OpBuilder + + +class FPQuantizerBuilder(OpBuilder): + BUILD_VAR = "DS_BUILD_FP_QUANTIZER" + NAME = "fp_quantizer" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.ops.fp_quantizer.{self.NAME}_op' + + def sources(self): + return [] + + def load(self, verbose=True): + if self.name in __class__._loaded_ops: + return __class__._loaded_ops[self.name] + + from deepspeed.git_version_info import installed_ops # noqa: F401 + if installed_ops.get(self.name, False): + op_module = importlib.import_module(self.absolute_name()) + __class__._loaded_ops[self.name] = op_module + return op_module diff --git a/op_builder/hpu/transformer_inference.py b/op_builder/hpu/transformer_inference.py new file mode 100644 index 000000000000..e397c99200ec --- /dev/null +++ b/op_builder/hpu/transformer_inference.py @@ -0,0 +1,39 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 +import importlib + +# DeepSpeed Team + +try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 + from op_builder.builder import OpBuilder +except ImportError: + from deepspeed.ops.op_builder.builder import OpBuilder + + +class InferenceBuilder(OpBuilder): + BUILD_VAR = "DS_BUILD_TRANSFORMER_INFERENCE" + NAME = "transformer_inference" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=self.NAME) + + def absolute_name(self): + return f"deepspeed.ops.transformer.inference.{self.NAME}_op" + + def sources(self): + return [] + + def load(self, verbose=True): + if self.name in __class__._loaded_ops: + return __class__._loaded_ops[self.name] + + from deepspeed.git_version_info import installed_ops # noqa: F401 + if installed_ops.get(self.name, False): + op_module = importlib.import_module(self.absolute_name()) + __class__._loaded_ops[self.name] = op_module + return op_module diff --git a/op_builder/inference_core_ops.py b/op_builder/inference_core_ops.py index d1957f39d9a8..45e8628e669f 100755 --- a/op_builder/inference_core_ops.py +++ b/op_builder/inference_core_ops.py @@ -23,7 +23,8 @@ def is_compatible(self, verbose=True): try: import torch except ImportError: - self.warning("Please install torch if trying to pre-compile inference kernels") + if verbose: + self.warning("Please install torch if trying to pre-compile inference kernels") return False cuda_okay = True @@ -32,11 +33,13 @@ def is_compatible(self, verbose=True): torch_cuda_major = int(torch.version.cuda.split('.')[0]) cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda if cuda_capability < 6: - self.warning("NVIDIA Inference is only supported on Pascal and newer architectures") + if verbose: + self.warning("NVIDIA Inference is only supported on Pascal and newer architectures") cuda_okay = False if cuda_capability >= 8: if torch_cuda_major < 11 or sys_cuda_major < 11: - self.warning("On Ampere and higher architectures please use CUDA 11+") + if verbose: + self.warning("On Ampere and higher architectures please use CUDA 11+") cuda_okay = False return super().is_compatible(verbose) and cuda_okay diff --git a/op_builder/inference_cutlass_builder.py b/op_builder/inference_cutlass_builder.py index 51f7931d9435..fda6e74bbf6a 100644 --- a/op_builder/inference_cutlass_builder.py +++ b/op_builder/inference_cutlass_builder.py @@ -22,7 +22,8 @@ def is_compatible(self, verbose=True): try: import torch except ImportError: - self.warning("Please install torch if trying to pre-compile inference kernels") + if verbose: + self.warning("Please install torch if trying to pre-compile inference kernels") return False cuda_okay = True @@ -31,11 +32,13 @@ def is_compatible(self, verbose=True): torch_cuda_major = int(torch.version.cuda.split('.')[0]) cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda if cuda_capability < 6: - self.warning("NVIDIA Inference is only supported on Pascal and newer architectures") + if verbose: + self.warning("NVIDIA Inference is only supported on Pascal and newer architectures") cuda_okay = False if cuda_capability >= 8: if torch_cuda_major < 11 or sys_cuda_major < 11: - self.warning("On Ampere and higher architectures please use CUDA 11+") + if verbose: + self.warning("On Ampere and higher architectures please use CUDA 11+") cuda_okay = False return super().is_compatible(verbose) and cuda_okay diff --git a/op_builder/ragged_ops.py b/op_builder/ragged_ops.py index ec7cab91885f..a4e365786a2b 100644 --- a/op_builder/ragged_ops.py +++ b/op_builder/ragged_ops.py @@ -23,7 +23,8 @@ def is_compatible(self, verbose=True): try: import torch except ImportError: - self.warning("Please install torch if trying to pre-compile inference kernels") + if verbose: + self.warning("Please install torch if trying to pre-compile inference kernels") return False cuda_okay = True @@ -32,11 +33,13 @@ def is_compatible(self, verbose=True): torch_cuda_major = int(torch.version.cuda.split('.')[0]) cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda if cuda_capability < 6: - self.warning("NVIDIA Inference is only supported on Pascal and newer architectures") + if verbose: + self.warning("NVIDIA Inference is only supported on Pascal and newer architectures") cuda_okay = False if cuda_capability >= 8: if torch_cuda_major < 11 or sys_cuda_major < 11: - self.warning("On Ampere and higher architectures please use CUDA 11+") + if verbose: + self.warning("On Ampere and higher architectures please use CUDA 11+") cuda_okay = False return super().is_compatible(verbose) and cuda_okay diff --git a/op_builder/ragged_utils.py b/op_builder/ragged_utils.py index 89450e1fd30d..a855f072af8c 100755 --- a/op_builder/ragged_utils.py +++ b/op_builder/ragged_utils.py @@ -23,7 +23,8 @@ def is_compatible(self, verbose=True): try: import torch except ImportError: - self.warning("Please install torch if trying to pre-compile inference kernels") + if verbose: + self.warning("Please install torch if trying to pre-compile inference kernels") return False cuda_okay = True @@ -32,11 +33,13 @@ def is_compatible(self, verbose=True): torch_cuda_major = int(torch.version.cuda.split('.')[0]) cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda if cuda_capability < 6: - self.warning("NVIDIA Inference is only supported on Pascal and newer architectures") + if verbose: + self.warning("NVIDIA Inference is only supported on Pascal and newer architectures") cuda_okay = False if cuda_capability >= 8: if torch_cuda_major < 11 or sys_cuda_major < 11: - self.warning("On Ampere and higher architectures please use CUDA 11+") + if verbose: + self.warning("On Ampere and higher architectures please use CUDA 11+") cuda_okay = False return super().is_compatible(verbose) and cuda_okay diff --git a/op_builder/sparse_attn.py b/op_builder/sparse_attn.py index 188d257ff4ef..2385adc8fe9c 100644 --- a/op_builder/sparse_attn.py +++ b/op_builder/sparse_attn.py @@ -27,45 +27,51 @@ def sources(self): def cxx_args(self): return ['-O2', '-fopenmp'] - def is_compatible(self, verbose=True): + def is_compatible(self, verbose=False): # Check to see if llvm and cmake are installed since they are dependencies #required_commands = ['llvm-config|llvm-config-9', 'cmake'] #command_status = list(map(self.command_exists, required_commands)) #deps_compatible = all(command_status) if self.is_rocm_pytorch(): - self.warning(f'{self.NAME} is not compatible with ROCM') + if verbose: + self.warning(f'{self.NAME} is not compatible with ROCM') return False try: import torch except ImportError: - self.warning(f"unable to import torch, please install it first") + if verbose: + self.warning(f"unable to import torch, please install it first") return False # torch-cpu will not have a cuda version if torch.version.cuda is None: cuda_compatible = False - self.warning(f"{self.NAME} cuda is not available from torch") + if verbose: + self.warning(f"{self.NAME} cuda is not available from torch") else: major, minor = torch.version.cuda.split('.')[:2] cuda_compatible = (int(major) == 10 and int(minor) >= 1) or (int(major) >= 11) if not cuda_compatible: - self.warning(f"{self.NAME} requires CUDA version 10.1+") + if verbose: + self.warning(f"{self.NAME} requires CUDA version 10.1+") TORCH_MAJOR = int(torch.__version__.split('.')[0]) TORCH_MINOR = int(torch.__version__.split('.')[1]) torch_compatible = (TORCH_MAJOR == 1 and TORCH_MINOR >= 5) if not torch_compatible: - self.warning( - f'{self.NAME} requires a torch version >= 1.5 and < 2.0 but detected {TORCH_MAJOR}.{TORCH_MINOR}') + if verbose: + self.warning( + f'{self.NAME} requires a torch version >= 1.5 and < 2.0 but detected {TORCH_MAJOR}.{TORCH_MINOR}') try: import triton except ImportError: # auto-install of triton is broken on some systems, reverting to manual install for now # see this issue: https://github.com/microsoft/DeepSpeed/issues/1710 - self.warning(f"please install triton==1.0.0 if you want to use sparse attention") + if verbose: + self.warning(f"please install triton==1.0.0 if you want to use sparse attention") return False if pkg_version: @@ -76,7 +82,9 @@ def is_compatible(self, verbose=True): triton_mismatch = installed_triton != "1.0.0" if triton_mismatch: - self.warning(f"using untested triton version ({installed_triton}), only 1.0.0 is known to be compatible") + if verbose: + self.warning( + f"using untested triton version ({installed_triton}), only 1.0.0 is known to be compatible") return False return super().is_compatible(verbose) and torch_compatible and cuda_compatible diff --git a/op_builder/spatial_inference.py b/op_builder/spatial_inference.py index 59caf57f938d..8a6b36cce0b0 100644 --- a/op_builder/spatial_inference.py +++ b/op_builder/spatial_inference.py @@ -21,7 +21,8 @@ def is_compatible(self, verbose=True): try: import torch except ImportError: - self.warning("Please install torch if trying to pre-compile inference kernels") + if verbose: + self.warning("Please install torch if trying to pre-compile inference kernels") return False cuda_okay = True @@ -31,7 +32,8 @@ def is_compatible(self, verbose=True): cuda_capability = torch.cuda.get_device_properties(0).major if cuda_capability >= 8: if torch_cuda_major < 11 or sys_cuda_major < 11: - self.warning("On Ampere and higher architectures please use CUDA 11+") + if verbose: + self.warning("On Ampere and higher architectures please use CUDA 11+") cuda_okay = False return super().is_compatible(verbose) and cuda_okay diff --git a/op_builder/transformer_inference.py b/op_builder/transformer_inference.py index 5ee902289448..88b77499cc0e 100755 --- a/op_builder/transformer_inference.py +++ b/op_builder/transformer_inference.py @@ -21,7 +21,8 @@ def is_compatible(self, verbose=True): try: import torch except ImportError: - self.warning("Please install torch if trying to pre-compile inference kernels") + if verbose: + self.warning("Please install torch if trying to pre-compile inference kernels") return False cuda_okay = True @@ -30,11 +31,13 @@ def is_compatible(self, verbose=True): torch_cuda_major = int(torch.version.cuda.split('.')[0]) cuda_capability = torch.cuda.get_device_properties(0).major if cuda_capability < 6: - self.warning("NVIDIA Inference is only supported on Pascal and newer architectures") + if verbose: + self.warning("NVIDIA Inference is only supported on Pascal and newer architectures") cuda_okay = False if cuda_capability >= 8: if torch_cuda_major < 11 or sys_cuda_major < 11: - self.warning("On Ampere and higher architectures please use CUDA 11+") + if verbose: + self.warning("On Ampere and higher architectures please use CUDA 11+") cuda_okay = False return super().is_compatible(verbose) and cuda_okay diff --git a/pre-commit-toggle.sh b/pre-commit-toggle.sh new file mode 100755 index 000000000000..c458c7f2d0da --- /dev/null +++ b/pre-commit-toggle.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Path to the pre-commit configuration file +PRE_COMMIT_CONFIG=".pre-commit-config.yaml" + +# Install pre-commit +pip install -r ${DEEPSPEED_FORK_ROOT}/requirements/requirements-dev.txt + +# Enable pre-commit +function enable_pre_commit() { + # Install pre-commit hooks + pre-commit install +} + +# Disable pre-commit +function disable_pre_commit() { + # Install pre-commit hooks + pre-commit uninstall +} + +# Check if the pre-commit configuration file exists +if [ -f "$PRE_COMMIT_CONFIG" ]; then + echo "Pre-commit configuration file found: $PRE_COMMIT_CONFIG" +else + echo "Pre-commit configuration file not found: $PRE_COMMIT_CONFIG" + exit 1 +fi + +# Check the command-line argument to enable or disable pre-commit +if [ "$1" == "enable" ]; then + enable_pre_commit +elif [ "$1" == "disable" ]; then + disable_pre_commit +else + echo "Usage: ./pre-commit-toggle.sh [enable|disable]" + exit 1 +fi diff --git a/requirements/requirements-dev.txt b/requirements/requirements-dev.txt index c0fc5dba9d33..d323b8838cbf 100644 --- a/requirements/requirements-dev.txt +++ b/requirements/requirements-dev.txt @@ -7,7 +7,7 @@ future importlib-metadata>=4 mup pre-commit>=2.20.0 -pytest>=7.2.0 +pytest==7.1.2 pytest-forked pytest-randomly pytest-xdist diff --git a/requirements/requirements-sparse_attn.txt b/requirements/requirements-sparse_attn.txt index f929bb0168a5..09386fdcb120 100755 --- a/requirements/requirements-sparse_attn.txt +++ b/requirements/requirements-sparse_attn.txt @@ -1 +1 @@ -triton==1.0.0 +triton==2.0.0.dev20221202 diff --git a/tests/conftest.py b/tests/conftest.py index 45e8434a021b..4e5737724e32 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -11,6 +11,14 @@ from os.path import abspath, dirname, join import torch import warnings +from unit.ci_promote_marker import * +from unit.xfail_marker import * +from unit.skip_marker import * +from unit.compile_marker import * +from unit.a100_marker import * +from unit.util import get_hpu_dev_version +from deepspeed.accelerator import get_accelerator +from unit.util import hpu_lazy_enabled # Set this environment variable for the T5 inference unittest(s) (e.g. google/t5-v1_1-small) os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python' @@ -70,6 +78,100 @@ def pytest_runtest_call(item): item.runtest = lambda: True # Dummy function so test is not run twice +def pytest_collection_modifyitems(items, config): + device = get_accelerator().device_name() + gaudi_dev = get_hpu_dev_version() + hpu_lazy_mode = hpu_lazy_enabled() + # Add comipile, CI and Promote marker + marker_expression = config.getoption("-m") + # This is to handle the case where marker is already present and compile marker is added. to avoid running of compile tests in other markers when not specified + if marker_expression not in ["compile_1c", "compile_4c"]: + deselected = [] + remaining_items = [] + for item in items: + if item._nodeid in compile_tests_4c or item._nodeid in compile_tests_1c: + deselected.append(item) + continue + remaining_items.append(item) + items[:] = remaining_items # Only tests with 'compile_mode' False remain + config.hook.pytest_deselected(items=deselected) + for item in items: + if item._nodeid in compile_tests_4c: + item._pyfuncitem.add_marker(pytest.mark.compile_4c) + if item._nodeid in compile_tests_1c: + item._pyfuncitem.add_marker(pytest.mark.compile_1c) + if device != 'hpu': + if item._nodeid in a100_tests: + item._pyfuncitem.add_marker(pytest.mark.a100) + if item._nodeid in hpu_ci_tests: + item._pyfuncitem.add_marker(pytest.mark.hpu_ci) + if item._nodeid in hpu_ci_tests_4cards: + item._pyfuncitem.add_marker(pytest.mark.hpu_ci_4cards) + if item._nodeid in gpu_ci_tests: + item._pyfuncitem.add_marker(pytest.mark.gpu_ci) + if item._nodeid in hpu_promote_tests: + item._pyfuncitem.add_marker(pytest.mark.hpu_promote) + if item._nodeid in hpu_promote_tests_4cards: + item._pyfuncitem.add_marker(pytest.mark.hpu_promote_4cards) + if item._nodeid in gpu_promote_tests: + item._pyfuncitem.add_marker(pytest.mark.gpu_promote) + + # Add xfail and SKIP marker + item.user_properties.append(("module_name", item.module.__name__)) + if device == 'hpu': + # Lazy Run + if hpu_lazy_mode: + if item._nodeid in hpu_lazy_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=hpu_lazy_xfail_tests[item._nodeid])) + if item._nodeid in hpu_lazy_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skipif(reason=hpu_lazy_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi": + if item._nodeid in g1_lazy_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g1_lazy_xfail_tests[item._nodeid])) + if item._nodeid in g1_lazy_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g1_lazy_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi2": + if item._nodeid in g2_lazy_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g2_lazy_xfail_tests[item._nodeid])) + if item._nodeid in g2_lazy_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g2_lazy_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi3": + if item._nodeid in g3_lazy_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g3_lazy_xfail_tests[item._nodeid])) + if item._nodeid in g3_lazy_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g3_lazy_skip_tests[item._nodeid])) + # Eager Run + else: + if item._nodeid in hpu_eager_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=hpu_eager_xfail_tests[item._nodeid])) + if item._nodeid in hpu_eager_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skipif(reason=hpu_eager_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi": + if item._nodeid in g1_eager_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g1_eager_xfail_tests[item._nodeid])) + if item._nodeid in g1_eager_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g1_eager_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi2": + if item._nodeid in g2_eager_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g2_eager_xfail_tests[item._nodeid])) + if item._nodeid in g2_eager_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g2_eager_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi3": + if item._nodeid in g3_eager_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g3_eager_xfail_tests[item._nodeid])) + if item._nodeid in g3_eager_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g3_eager_skip_tests[item._nodeid])) + else: + if item._nodeid in gpu_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=gpu_xfail_tests[item._nodeid])) + if item._nodeid in gpu_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skipif(reason=gpu_skip_tests[item._nodeid])) + for marker in item.own_markers: + if marker.name in ['skip', 'xfail']: + if 'reason' in marker.kwargs: + item.user_properties.append(("message", marker.kwargs['reason'])) + + # We allow DistributedTest to reuse distributed environments. When the last # test for a class is run, we want to make sure those distributed environments # are destroyed. @@ -85,3 +187,11 @@ def pytest_fixture_setup(fixturedef, request): if getattr(fixturedef.func, "is_dist_fixture", False): dist_fixture_class = fixturedef.func() dist_fixture_class(request) + + +def pytest_runtest_makereport(item, call): + if call.when == 'call': + if call.excinfo: + if not (any('message' in prop for prop in item.user_properties)): + if call.excinfo.value: + item.user_properties.append(("message", call.excinfo.value)) diff --git a/tests/pytest.ini b/tests/pytest.ini index f841c47afc0c..bbd4a555bcd3 100644 --- a/tests/pytest.ini +++ b/tests/pytest.ini @@ -1,5 +1,5 @@ [pytest] -addopts = -m "not sequential and not nightly and not inference and not seq_inference and not inference_ops and not inference_v2 and not inference_v2_ops and not stable_diffusion and not evaluation" +addopts = -m "not sequential and not nightly and not inference and not seq_inference and not inference_ops and not inference_v2 and not inference_v2_ops and not stable_diffusion and not evaluation and not compile_4c and not compile_1c and not a100" markers = sequential:Tests that need to be run sequentially inference:Inference model tests @@ -11,3 +11,11 @@ markers = world_size:Change world size of individual tests in a class stable_diffusion:Tests that run Stable Diffusion evaluation:Tests that evaluate model correctness + compile: torch.compile tests + hpu_ci: hpu CI tests + hpu_ci_4cards: HPU CI with 4cards + hpu_promote: HPU Promote tests + hpu_promote_4cards: HPU Promote with 4cards + gpu_ci: GPU CI tests + gpu_promote: GPU Promote tests + a100: Run Unsupported titan-xp tests on a100 diff --git a/tests/unit/a100_marker.py b/tests/unit/a100_marker.py new file mode 100644 index 000000000000..0431f8987d06 --- /dev/null +++ b/tests/unit/a100_marker.py @@ -0,0 +1,84 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +a100_tests = [ + "unit/runtime/half_precision/test_bf16.py::TestZero2ReduceScatterOff::test", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[FusedAdam]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[Adam]", + "unit/runtime/half_precision/test_bf16.py::TestZeroAllowUntestedOptimizer::test", + "unit/runtime/half_precision/test_bf16.py::TestZeroEmptyPartition::test", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[bfp16-bfp16]", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[bfp16-fp32]", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[fp16-bfp16]", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[bfp16-fp16]", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[default-bfp16]", + "unit/runtime/half_precision/test_bf16.py::TestZeroEmptyGrad::test", + "unit/runtime/half_precision/test_bf16.py::TestAdamBF16ZeroOneCycleCompatibility::test", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-2-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-1-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype0]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-amp]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-amp]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-bf16-zero1]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-zero2]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-bf16-None]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-None]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-zero2]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-bf16-zero1]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-bf16-zero2]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-bf16-zero3]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-bf16-amp]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-bf16-None]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-bf16-amp]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-zero1]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-None]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-bf16-zero3]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-zero3]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-zero3]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-zero1]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-bf16-zero2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-1-dtype0]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_bf16_fragments[False]", + "unit/runtime/sparse_tensor/test_averaging_sparse_gradients.py::TestSparseAdam::test", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_base[topo_config0]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config2]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_base[topo_config2]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config1]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config0]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_base[topo_config1]", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-False]", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-False]", + "unit/runtime/zero/test_nvme_checkpointing.py::TestNVMeCheckpointing::test_nvme_checkpointing[nvme-nvme]", + "unit/runtime/zero/test_nvme_checkpointing.py::TestNVMeCheckpointing::test_nvme_checkpointing[cpu-nvme]", + "unit/runtime/zero/test_nvme_checkpointing.py::TestNVMeCheckpointing::test_nvme_checkpointing[cpu-cpu]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[nvme-3-full-dtype0]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[nvme-3-local-dtype1]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[nvme-3-local-dtype2]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[nvme-3-full-dtype2]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[nvme-3-full-dtype1]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[nvme-3-local-dtype0]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[nvme-3-local-False]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[nvme-3-local-True]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[nvme-3-full-False]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[nvme-3-full-True]", +] diff --git a/tests/unit/alexnet_model.py b/tests/unit/alexnet_model.py index 25256d376eeb..94c818b69192 100644 --- a/tests/unit/alexnet_model.py +++ b/tests/unit/alexnet_model.py @@ -100,12 +100,24 @@ def cifar_trainset(fp16=False): dist.barrier() if local_rank != 0: dist.barrier() - - data_root = os.getenv("TEST_DATA_DIR", "/tmp/") - trainset = torchvision.datasets.CIFAR10(root=os.path.join(data_root, "cifar10-data"), - train=True, - download=True, - transform=transform) + if os.getenv("CIFAR10_OFFLINE", default=None): + if os.getenv("CIFAR10_DATASET_PATH", default=None): + trainset = torchvision.datasets.CIFAR10(root=os.getenv("CIFAR10_DATASET_PATH", default=None), + train=True, + download=False, + transform=transform) + elif os.getenv("STORE_CIFAR10", default=None): + if os.getenv("CIFAR10_DATASET_PATH", default=None): + trainset = torchvision.datasets.CIFAR10(root=os.getenv("CIFAR10_DATASET_PATH", default=None), + train=True, + download=True, + transform=transform) + else: + data_root = os.getenv("TEST_DATA_DIR", "/tmp/") + trainset = torchvision.datasets.CIFAR10(root=os.path.join(data_root, "cifar10-data"), + train=True, + download=True, + transform=transform) if local_rank == 0: dist.barrier() return trainset diff --git a/tests/unit/checkpoint/common.py b/tests/unit/checkpoint/common.py index 3fb13b214ea0..957dd54fd826 100644 --- a/tests/unit/checkpoint/common.py +++ b/tests/unit/checkpoint/common.py @@ -17,6 +17,7 @@ from unit.common import preferred_dtype from unit.simple_model import * from unittest.mock import MagicMock, patch +from unit.util import hpu_lazy_enabled def compare_deepspeed_states(saved_model, loaded_model): @@ -155,6 +156,8 @@ def create_moe_param_groups(model): def create_deepspeed_model(config_dict, model, base_optimizer): + if hpu_lazy_enabled(): + model.to(get_accelerator().device_name()) ds_model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=create_moe_param_groups(model), @@ -174,11 +177,14 @@ def checkpoint_correctness_verification(config_dict, empty_tag=False, seq_dataloader=False, load_module_only=False, - dtype=None): + dtype=None, + compile_mode=False): if dtype == None: dtype = preferred_dtype() ds_model = create_deepspeed_model(config_dict=config_dict, model=models[0], base_optimizer=base_optimizers[0]) + if compile_mode: + ds_model.compile() if seq_dataloader: data_loader = sequence_dataloader(model=ds_model, diff --git a/tests/unit/checkpoint/test_latest_checkpoint.py b/tests/unit/checkpoint/test_latest_checkpoint.py index 5d795c4dadcf..cf9d6976d712 100644 --- a/tests/unit/checkpoint/test_latest_checkpoint.py +++ b/tests/unit/checkpoint/test_latest_checkpoint.py @@ -19,7 +19,8 @@ class TestLatestCheckpoint(DistributedTest): world_size = 1 - def test_existing_latest(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_existing_latest(self, tmpdir, compile_mode): config_dict = { "train_batch_size": 2, "steps_per_print": 1, @@ -39,9 +40,11 @@ def test_existing_latest(self, tmpdir): load_optimizer_states=True, load_lr_scheduler_states=False, empty_tag=True, - dtype=torch.float) + dtype=torch.float, + compile_mode=compile_mode) - def test_missing_latest(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_missing_latest(self, tmpdir, compile_mode): config_dict = { "train_batch_size": 2, "steps_per_print": 1, @@ -55,5 +58,7 @@ def test_missing_latest(self, tmpdir): hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) + if compile_mode: + model.compile() # should be no-op, since latest doesn't exist model.load_checkpoint(tmpdir) diff --git a/tests/unit/checkpoint/test_lr_scheduler.py b/tests/unit/checkpoint/test_lr_scheduler.py index 89c4dd1b49f7..b7b64656c543 100644 --- a/tests/unit/checkpoint/test_lr_scheduler.py +++ b/tests/unit/checkpoint/test_lr_scheduler.py @@ -15,12 +15,13 @@ import pytest +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage, use_cpu_offload', [(0, False), (1, False), (2, False), (2, True), (3, False), (3, True)]) class TestLRSchedulerCheckpoint(DistributedTest): world_size = 2 - def test_checkpoint_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): + def test_checkpoint_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload, compile_mode): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") if get_accelerator().device_name() == 'cpu': @@ -70,9 +71,10 @@ def test_checkpoint_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): hidden_dim, tmpdir, load_optimizer_states=False, - load_lr_scheduler_states=True) + load_lr_scheduler_states=True, + compile_mode=compile_mode) - def test_checkpoint_no_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): + def test_checkpoint_no_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload, compile_mode): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") if get_accelerator().device_name() == 'cpu': @@ -117,4 +119,5 @@ def test_checkpoint_no_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): hidden_dim, tmpdir, load_optimizer_states=False, - load_lr_scheduler_states=False) + load_lr_scheduler_states=False, + compile_mode=compile_mode) diff --git a/tests/unit/checkpoint/test_moe_checkpoint.py b/tests/unit/checkpoint/test_moe_checkpoint.py index 89878b5d8fa9..3f85d64d8a52 100644 --- a/tests/unit/checkpoint/test_moe_checkpoint.py +++ b/tests/unit/checkpoint/test_moe_checkpoint.py @@ -10,6 +10,8 @@ from unit.simple_model import * from unit.checkpoint.common import checkpoint_correctness_verification +from unit.util import hpu_lazy_enabled +from deepspeed.accelerator import get_accelerator import pytest @@ -38,8 +40,9 @@ def test_checkpoint_moe(self, tmpdir, ep_size): seq_dataloader=True, dtype=torch.float16) + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize("ep_size, load_optim_states", [(4, True), (4, False), (2, True), (2, False)]) - def test_checkpoint_moe_and_zero(self, tmpdir, ep_size, load_optim_states): + def test_checkpoint_moe_and_zero(self, tmpdir, ep_size, load_optim_states, compile_mode): if not required_torch_version(min_version=1.8): pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly") @@ -66,6 +69,9 @@ def test_checkpoint_moe_and_zero(self, tmpdir, ep_size, load_optim_states): hidden_dim = 16 models = [SimpleMoEModel(hidden_dim=hidden_dim, num_experts=ep_size, ep_size=ep_size) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] # param group must have a random unique name (for now) # TODO: clean-up this requirement, the unique name should not be required here param_groups = [{'params': [p for p in model.parameters()], 'name': 'random-unique-name'} for model in models] @@ -80,4 +86,5 @@ def test_checkpoint_moe_and_zero(self, tmpdir, ep_size, load_optim_states): empty_tag=True, base_optimizers=optimizers, seq_dataloader=True, - dtype=torch.float16) + dtype=torch.float16, + compile_mode=compile_mode) diff --git a/tests/unit/checkpoint/test_other_optimizer.py b/tests/unit/checkpoint/test_other_optimizer.py index bcff7f5e3072..7cb8c0603bc8 100644 --- a/tests/unit/checkpoint/test_other_optimizer.py +++ b/tests/unit/checkpoint/test_other_optimizer.py @@ -18,7 +18,8 @@ class TestOtherOptimizerCheckpoint(DistributedTest): world_size = 2 @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") - def test_checkpoint_unfused_optimizer(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_checkpoint_unfused_optimizer(self, tmpdir, compile_mode): #if not get_accelerator().is_fp16_supported(): # pytest.skip("fp16 is not supported") config_dict = { @@ -62,16 +63,19 @@ def test_checkpoint_unfused_optimizer(self, tmpdir): models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, - load_optimizer_states=True) + load_optimizer_states=True, + compile_mode=compile_mode) # Ignore optimizer states checkpoint_correctness_verification(config_dict, models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, - load_optimizer_states=False) + load_optimizer_states=False, + compile_mode=compile_mode) - def test_checkpoint_fused_optimizer(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_checkpoint_fused_optimizer(self, tmpdir, compile_mode): if get_accelerator().device_name() == "cpu": pytest.skip("CPU accelerator does not support this test") config_dict = { @@ -101,16 +105,19 @@ def test_checkpoint_fused_optimizer(self, tmpdir): models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, - load_optimizer_states=True) + load_optimizer_states=True, + compile_mode=compile_mode) # Ignore optimizer states checkpoint_correctness_verification(config_dict, models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, - load_optimizer_states=False) + load_optimizer_states=False, + compile_mode=compile_mode) - def test_checkpoint_fp32_optimizer(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_checkpoint_fp32_optimizer(self, tmpdir, compile_mode): config_dict = { "train_batch_size": 2, "steps_per_print": 1, @@ -135,4 +142,5 @@ def test_checkpoint_fp32_optimizer(self, tmpdir): models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, - dtype=torch.float32) + dtype=torch.float32, + compile_mode=compile_mode) diff --git a/tests/unit/checkpoint/test_pipeline.py b/tests/unit/checkpoint/test_pipeline.py index c6c228ccada7..c90f5dbe1cf6 100644 --- a/tests/unit/checkpoint/test_pipeline.py +++ b/tests/unit/checkpoint/test_pipeline.py @@ -15,8 +15,9 @@ class TestPipelineCheckpoint(DistributedTest): world_size = 4 + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize("zero_stage", [0, 1]) - def test_checkpoint_pipe_engine(self, zero_stage, tmpdir): + def test_checkpoint_pipe_engine(self, zero_stage, tmpdir, compile_mode): skip_on_arch(min_arch=7) config_dict = { @@ -61,7 +62,8 @@ def test_checkpoint_pipe_engine(self, zero_stage, tmpdir): load_optimizer_states=True, load_lr_scheduler_states=True, train_batch=True, - dtype=torch.float16 if zero_stage > 0 else torch.float32) + dtype=torch.float16 if zero_stage > 0 else torch.float32, + compile_mode=compile_mode) @pytest.mark.parametrize( "base_topo,test_topo", diff --git a/tests/unit/checkpoint/test_shared_weights.py b/tests/unit/checkpoint/test_shared_weights.py index ed69073fb81c..d3e0db81af6f 100644 --- a/tests/unit/checkpoint/test_shared_weights.py +++ b/tests/unit/checkpoint/test_shared_weights.py @@ -7,8 +7,11 @@ import torch.nn as nn import deepspeed +import pytest from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint from unit.common import DistributedTest +from unit.util import hpu_lazy_enabled +from deepspeed.accelerator import get_accelerator class ModelWithSharedWeights(nn.Module): @@ -25,7 +28,8 @@ def __init__(self): class TestCheckpointSharedWeights(DistributedTest): world_size = 2 - def test_checkpoint_shared_weights(self, tmp_path): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_checkpoint_shared_weights(self, tmp_path, compile_mode): config = { "train_micro_batch_size_per_gpu": 2, "zero_allow_untested_optimizer": True, @@ -34,6 +38,9 @@ def test_checkpoint_shared_weights(self, tmp_path): }, } model = ModelWithSharedWeights() + if hpu_lazy_enabled(): + device = get_accelerator().current_device_name() + model.to(device) optimizer = torch.optim.Adam(model.parameters()) deepspeed_engine, _, _, _ = deepspeed.initialize( @@ -41,6 +48,9 @@ def test_checkpoint_shared_weights(self, tmp_path): model=model, optimizer=optimizer, ) + if compile_mode: + deepspeed_engine.compile() + filename = tmp_path / "checkpoint.pt" deepspeed_engine.save_checkpoint(filename, tag="checkpoint") diff --git a/tests/unit/checkpoint/test_sparse.py b/tests/unit/checkpoint/test_sparse.py index 19fbcd81e473..e2f0e1dc079b 100644 --- a/tests/unit/checkpoint/test_sparse.py +++ b/tests/unit/checkpoint/test_sparse.py @@ -24,8 +24,9 @@ class TestSparseCheckpoint(DistributedTest): [True, False], [True, True], ]) + @pytest.mark.parametrize('compile_mode', [True, False]) def test_non_strict_load_sparse(self, tmpdir, to_save_model_has_embedding, to_save_model_sparse, - destination_has_embedding, destination_sparse): + destination_has_embedding, destination_sparse, compile_mode): class ModelNoEmbedding(torch.nn.Module): @@ -66,6 +67,10 @@ def forward(self, x, offsets): "sparse_gradients": destination_sparse }) + if compile_mode: + engine_to_save.compile() + engine_destination.compile() + save_folder = os.path.join(tmpdir, 'saved_checkpoint') save_tag = '1' diff --git a/tests/unit/checkpoint/test_tag_validation.py b/tests/unit/checkpoint/test_tag_validation.py index b164c31e52b0..edbc42dcadf4 100644 --- a/tests/unit/checkpoint/test_tag_validation.py +++ b/tests/unit/checkpoint/test_tag_validation.py @@ -14,8 +14,9 @@ class TestCheckpointValidationTag(DistributedTest): world_size = 2 + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('valid_mode', ["FAIL", "WARN", "IGNORE"]) - def test_checkpoint_unique_tag(self, tmpdir, valid_mode): + def test_checkpoint_unique_tag(self, tmpdir, valid_mode, compile_mode): config_dict = { "train_batch_size": 2, "steps_per_print": 1, @@ -33,13 +34,16 @@ def test_checkpoint_unique_tag(self, tmpdir, valid_mode): model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) + if compile_mode: + model.compile() if valid_mode == "FAIL": with pytest.raises(AssertionError): model.save_checkpoint(save_dir=tmpdir, tag=f"tag-{dist.get_rank()}") else: model.save_checkpoint(save_dir=tmpdir, tag=f"tag-{dist.get_rank()}") - def test_checkpoint_unknown_tag_validation(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_checkpoint_unknown_tag_validation(self, tmpdir, compile_mode): config_dict = { "train_batch_size": 2, @@ -60,3 +64,5 @@ def test_checkpoint_unknown_tag_validation(self, tmpdir): with pytest.raises(deepspeed.DeepSpeedConfigError): model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) + if compile_mode: + model.compile() diff --git a/tests/unit/checkpoint/test_universal_checkpoint.py b/tests/unit/checkpoint/test_universal_checkpoint.py index e0c4f4745043..a6b7a402e559 100644 --- a/tests/unit/checkpoint/test_universal_checkpoint.py +++ b/tests/unit/checkpoint/test_universal_checkpoint.py @@ -13,9 +13,10 @@ from unit.common import DistributedTest, DistributedFixture from unit.simple_model import * -from unit.util import bf16_required_version_check +from unit.util import bf16_required_version_check, hpu_lazy_enabled from unit.checkpoint.common import compare_opt_state_dicts, compare_state_dicts +from deepspeed.accelerator import get_accelerator import pytest import deepspeed.comm as dist @@ -79,6 +80,9 @@ def train_save_convert(ds_config, hidden_dim, load_optim, use_torch_adam, dtype, test_step = 8 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + model = model.to(device) model = init_ds_engine(model, ds_config, use_torch_adam) data_loader = random_dataloader(model=model, total_samples=test_step, @@ -162,9 +166,10 @@ class baseline_ws4(_baseline): @pytest.mark.parametrize("zero_stage", [1]) @pytest.mark.parametrize("use_torch_adam", [False, True]) @pytest.mark.parametrize("load_optim", [False, True]) +@pytest.mark.parametrize('compile_mode', [True, False]) class TestZeROUniversalCheckpointDP(DistributedTest): - def _run_test(self, tmpdir, dtype, ds_config, load_optim, use_torch_adam): + def _run_test(self, tmpdir, dtype, ds_config, load_optim, use_torch_adam, compile_mode): if dtype == torch.bfloat16 and not bf16_required_version_check(): pytest.skip( " DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly" @@ -175,7 +180,14 @@ def _run_test(self, tmpdir, dtype, ds_config, load_optim, use_torch_adam): ds_config["checkpoint"] = {"load_universal": True} univ_model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + univ_model = univ_model.to(device) + univ_model = init_ds_engine(univ_model, ds_config, use_torch_adam) + if compile_mode: + univ_model.compile() + univ_model.load_checkpoint(tmpdir, tag=f"{CP_TAG}_universal", load_optimizer_states=load_optim) model_state = univ_model.state_dict() @@ -203,13 +215,16 @@ def _run_test(self, tmpdir, dtype, ds_config, load_optim, use_torch_adam): univ_model.step() @pytest.mark.world_size(2) - def test_dp_world_size_2to2(self, baseline_ws2, tmpdir, dtype, ds_config, load_optim, use_torch_adam): - self._run_test(tmpdir, dtype, ds_config, load_optim, use_torch_adam) + def test_dp_world_size_2to2(self, baseline_ws2, tmpdir, dtype, ds_config, load_optim, use_torch_adam, + compile_mode): + self._run_test(tmpdir, dtype, ds_config, load_optim, use_torch_adam, compile_mode) @pytest.mark.world_size(2) - def test_dp_world_size_4to2(self, baseline_ws4, tmpdir, dtype, ds_config, load_optim, use_torch_adam): - self._run_test(tmpdir, dtype, ds_config, load_optim, use_torch_adam) + def test_dp_world_size_4to2(self, baseline_ws4, tmpdir, dtype, ds_config, load_optim, use_torch_adam, + compile_mode): + self._run_test(tmpdir, dtype, ds_config, load_optim, use_torch_adam, compile_mode) @pytest.mark.world_size(4) - def test_dp_world_size_2to4(self, baseline_ws2, tmpdir, dtype, ds_config, load_optim, use_torch_adam): - self._run_test(tmpdir, dtype, ds_config, load_optim, use_torch_adam) + def test_dp_world_size_2to4(self, baseline_ws2, tmpdir, dtype, ds_config, load_optim, use_torch_adam, + compile_mode): + self._run_test(tmpdir, dtype, ds_config, load_optim, use_torch_adam, compile_mode) diff --git a/tests/unit/checkpoint/test_zero_optimizer.py b/tests/unit/checkpoint/test_zero_optimizer.py index 84b4eca6e2ca..1e5587a02078 100644 --- a/tests/unit/checkpoint/test_zero_optimizer.py +++ b/tests/unit/checkpoint/test_zero_optimizer.py @@ -12,6 +12,7 @@ from unit.common import DistributedTest, DistributedFixture from unit.simple_model import * +from unit.util import hpu_lazy_enabled from unit.checkpoint.common import * @@ -21,8 +22,9 @@ class TestZeROCheckpoint(DistributedTest): world_size = 2 + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage', [3]) - def test_pipeline_checkpoint_loading(self, tmpdir, zero_stage): + def test_pipeline_checkpoint_loading(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 2, "optimizer": { @@ -41,14 +43,19 @@ def test_pipeline_checkpoint_loading(self, tmpdir, zero_stage): with deepspeed.zero.Init(): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + load_module_only=True, + compile_mode=compile_mode) - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_module_only=True) - + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage, use_cpu_offload, adam_optimizer', [(1, False, 'Adam'), (2, False, 'Adam'), (2, True, 'deepspeed_adam'), (3, False, 'Adam'), (3, True, 'deepspeed_adam')]) - def test_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_optimizer): + def test_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_optimizer, compile_mode): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") @@ -81,14 +88,23 @@ def test_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_op models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] else: models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=True) + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + load_optimizer_states=True, + compile_mode=compile_mode) + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage, use_cpu_offload, adam_optimizer', [(1, False, "Adam"), (2, False, "Adam"), (2, True, 'deepspeed_adam'), (3, False, 'Adam'), (3, True, 'deepspeed_adam')]) - def test_not_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_optimizer): + def test_not_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_optimizer, compile_mode): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") @@ -122,11 +138,20 @@ def test_not_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, ada models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] else: models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=False) + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + load_optimizer_states=False, + compile_mode=compile_mode) + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage', [1, 2]) - def test_hybrid_optimizer_state(self, tmpdir, zero_stage): + def test_hybrid_optimizer_state(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_micro_batch_size_per_gpu": 2, "gradient_accumulation_steps": 2, @@ -142,6 +167,9 @@ def test_hybrid_optimizer_state(self, tmpdir, zero_stage): config_dict["bf16"] = {"enabled": True} hidden_dim = 10 models = [SimpleModel(hidden_dim=hidden_dim) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] optimizers = [HybridStateOptimizer(model.parameters()) for model in models] checkpoint_correctness_verification(config_dict, @@ -149,10 +177,12 @@ def test_hybrid_optimizer_state(self, tmpdir, zero_stage): base_optimizers=optimizers, hidden_dim=hidden_dim, tmpdir=tmpdir, - load_optimizer_states=True) + load_optimizer_states=True, + compile_mode=compile_mode) + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage', [0, 1, 2, 3]) - def test_load_module_only(self, tmpdir, zero_stage): + def test_load_module_only(self, tmpdir, zero_stage, compile_mode): if zero_stage == 0 and get_accelerator().device_name() == "cpu": pytest.skip("CPU Accelerator does not support this test") config_dict = { @@ -175,8 +205,16 @@ def test_load_module_only(self, tmpdir, zero_stage): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] else: models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_module_only=True) + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + load_module_only=True, + compile_mode=compile_mode) class ws4_model_checkpoint(DistributedFixture): @@ -212,13 +250,14 @@ def run(self, class_tmpdir, elastic_save, load_optim): model.save_checkpoint(class_tmpdir) +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize("elastic_save", [True, False]) @pytest.mark.parametrize("elastic_load", [True, False]) @pytest.mark.parametrize("load_optim", [True, False]) class TestZeROElasticCheckpoint(DistributedTest): world_size = 2 - def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, load_optim): + def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, load_optim, compile_mode): ds_config = { "train_batch_size": 2, "optimizer": { @@ -243,6 +282,8 @@ def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, l model, _, _, _ = deepspeed.initialize(config=ds_config, model=models[0], model_parameters=models[0].parameters()) + if compile_mode: + model.compile() run_steps = 8 data_loader = random_dataloader(model=model, total_samples=run_steps, @@ -261,6 +302,8 @@ def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, l model, _, _, _ = deepspeed.initialize(config=ds_config, model=models[1], model_parameters=models[1].parameters()) + if compile_mode: + model.compile() model.load_checkpoint(tmpdir, load_optimizer_states=load_optim) if load_optim: @@ -275,7 +318,7 @@ def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, l model.step() def test_elastic_checkpoint_change_dp(self, ws4_model_checkpoint, class_tmpdir, elastic_save, elastic_load, - load_optim): + load_optim, compile_mode): ds_config = { "train_batch_size": 4, "optimizer": { @@ -295,6 +338,8 @@ def test_elastic_checkpoint_change_dp(self, ws4_model_checkpoint, class_tmpdir, # Load checkpoint with dp world size = 2 model, _, _, _ = deepspeed.initialize(config=ds_config, model=model, model_parameters=model.parameters()) + if compile_mode: + model.compile() if load_optim: with pytest.raises(deepspeed.runtime.zero.utils.ZeRORuntimeException): model.load_checkpoint(class_tmpdir, load_optimizer_states=load_optim) @@ -302,11 +347,12 @@ def test_elastic_checkpoint_change_dp(self, ws4_model_checkpoint, class_tmpdir, model.load_checkpoint(class_tmpdir, load_optimizer_states=load_optim) +@pytest.mark.parametrize('compile_mode', [True, False]) class TestZeROSaveLoadEdgeCase(DistributedTest): world_size = 2 @pytest.mark.parametrize('zero_stage', [0, 1, 2, 3]) - def test_immediate_save_load(self, tmpdir, zero_stage): + def test_immediate_save_load(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 4, "optimizer": { @@ -324,6 +370,8 @@ def test_immediate_save_load(self, tmpdir, zero_stage): model = SimpleModel(hidden_dim) ds_model = create_deepspeed_model(config_dict=config_dict, model=model, base_optimizer=None) + if compile_mode: + ds_model.compile() ds_model.save_checkpoint(tmpdir) ds_model.load_checkpoint(tmpdir, load_optimizer_states=False, @@ -331,7 +379,7 @@ def test_immediate_save_load(self, tmpdir, zero_stage): load_module_only=False) @pytest.mark.parametrize('zero_stage', [0, 1, 2, 3]) - def test_load_immediate_save(self, tmpdir, zero_stage): + def test_load_immediate_save(self, tmpdir, zero_stage, compile_mode): if zero_stage == 0 and get_accelerator().device_name() == "cpu": pytest.skip("CPU Accelerator does not support this test") config_dict = { @@ -352,6 +400,8 @@ def test_load_immediate_save(self, tmpdir, zero_stage): # 1. pretrain a model and save it ds_model = create_deepspeed_model(config_dict=config_dict, model=model, base_optimizer=None) + if compile_mode: + ds_model.compile() data_loader = random_dataloader(model=ds_model, total_samples=1, hidden_dim=hidden_dim, device=ds_model.device) for _, batch in enumerate(data_loader): loss = ds_model(batch[0], batch[1]) @@ -363,6 +413,8 @@ def test_load_immediate_save(self, tmpdir, zero_stage): # 2. load and immediately save a model with a fresh ds engine ds_model = create_deepspeed_model(config_dict=config_dict, model=model, base_optimizer=None) + if compile_mode: + ds_model.compile() ds_model.load_checkpoint(tmpdir, load_optimizer_states=False, load_lr_scheduler_states=False, @@ -370,7 +422,7 @@ def test_load_immediate_save(self, tmpdir, zero_stage): ds_model.save_checkpoint(tmpdir) @pytest.mark.parametrize('zero_stage', [0, 1, 2, 3]) - def test_save_before_accum_grad_is_done(self, tmpdir, zero_stage): + def test_save_before_accum_grad_is_done(self, tmpdir, zero_stage, compile_mode): config_dict = { "optimizer": { "type": 'Adam' @@ -395,6 +447,8 @@ def test_save_before_accum_grad_is_done(self, tmpdir, zero_stage): # So we config grad_accum=2 and step only once and save_16bit_model ds_model = create_deepspeed_model(config_dict=config_dict, model=model, base_optimizer=None) + if compile_mode: + ds_model.compile() data_loader = random_dataloader(model=ds_model, total_samples=2, hidden_dim=hidden_dim, device=ds_model.device) batch = next(iter(data_loader)) @@ -411,11 +465,12 @@ def test_save_before_accum_grad_is_done(self, tmpdir, zero_stage): ds_model.save_checkpoint(tmpdir) +@pytest.mark.parametrize('compile_mode', [True, False]) class TestZeROCheckpointFrozenWeights(DistributedTest): world_size = 2 @pytest.mark.parametrize('zero_stage', [1, 2, 3]) - def test_load_optimizer_state(self, tmpdir, zero_stage): + def test_load_optimizer_state(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 2, @@ -442,11 +497,19 @@ def test_load_optimizer_state(self, tmpdir, zero_stage): with deepspeed.zero.Init(enabled=zero_stage == 3): models = [SimpleFrozenModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=True) + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + load_optimizer_states=True, + compile_mode=compile_mode) @pytest.mark.parametrize('zero_stage', [1, 2, 3]) - def test_not_load_optimizer_state(self, tmpdir, zero_stage): + def test_not_load_optimizer_state(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 2, @@ -472,11 +535,19 @@ def test_not_load_optimizer_state(self, tmpdir, zero_stage): with deepspeed.zero.Init(enabled=zero_stage == 3): models = [SimpleFrozenModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=False) + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + load_optimizer_states=False, + compile_mode=compile_mode) @pytest.mark.parametrize('zero_stage', [1, 2, 3]) - def test_load_module_only(self, tmpdir, zero_stage): + def test_load_module_only(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 2, "optimizer": { @@ -494,11 +565,19 @@ def test_load_module_only(self, tmpdir, zero_stage): with deepspeed.zero.Init(enabled=zero_stage == 3): models = [SimpleFrozenModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_module_only=True) + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + load_module_only=True, + compile_mode=compile_mode) @pytest.mark.parametrize('zero_stage', [1, 2]) - def test_save_exclude_frozen_weights(self, tmpdir, zero_stage): + def test_save_exclude_frozen_weights(self, tmpdir, zero_stage, compile_mode): world_size = 1 config_dict = { "train_micro_batch_size_per_gpu": 1, @@ -518,6 +597,8 @@ def test_save_exclude_frozen_weights(self, tmpdir, zero_stage): model = SimpleFrozenModel(hidden_dim, empty_grad=False) ds_engine, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict) + if compile_mode: + ds_engine.compile() # Validate backwards-compatibility of including frozen parameters in checkpoint all_ckpt_folder = os.path.join(tmpdir, 'all_params') @@ -546,7 +627,7 @@ def test_save_exclude_frozen_weights(self, tmpdir, zero_stage): assert loaded_trainable_param_names == trainable_param_names @pytest.mark.parametrize('zero_stage', [1, 2]) - def test_save_exclude_custom_frozen_weights(self, tmpdir, zero_stage): + def test_save_exclude_custom_frozen_weights(self, tmpdir, zero_stage, compile_mode): world_size = 1 config_dict = { "train_micro_batch_size_per_gpu": 1, @@ -566,6 +647,8 @@ def test_save_exclude_custom_frozen_weights(self, tmpdir, zero_stage): model = SimpleFrozenModel(hidden_dim, empty_grad=False) ds_engine, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict) + if compile_mode: + ds_engine.compile() # Validate custom state_dict model state_dict_bk = model.state_dict @@ -590,9 +673,10 @@ def test_save_exclude_custom_frozen_weights(self, tmpdir, zero_stage): class TestSaveTensorClone(DistributedTest): world_size = 1 + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage', [1, 2]) @pytest.mark.parametrize('use_cpu_device', [True, False]) - def test_save_tensor_clone(self, tmpdir, zero_stage, use_cpu_device): + def test_save_tensor_clone(self, tmpdir, zero_stage, use_cpu_device, compile_mode): ds_config = { "optimizer": { @@ -609,6 +693,8 @@ def test_save_tensor_clone(self, tmpdir, zero_stage, use_cpu_device): ref_model_state_dict = model.state_dict() ds_engine, _, _, _ = deepspeed.initialize(model=model, config_params=ds_config) + if compile_mode: + ds_engine.compile() clone_device = torch.device('cpu') if use_cpu_device else get_accelerator().current_device() clone_state_dict = clone_tensors_for_torch_save(ds_engine.module.state_dict()) compare_state_dicts(ref_model_state_dict, clone_state_dict) @@ -625,8 +711,9 @@ class TestZeRONonDistributed(DistributedTest): world_size = 1 init_distributed = False + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage', [1, 2, 3]) - def test_chmod_exception_handling(self, monkeypatch, zero_stage): + def test_chmod_exception_handling(self, monkeypatch, zero_stage, compile_mode): config_dict = { "optimizer": { @@ -644,6 +731,8 @@ def test_chmod_exception_handling(self, monkeypatch, zero_stage): model=net, model_parameters=net.parameters()) + if compile_mode: + engine.compile() log_called = False def mock_logger_info(message, *args, **kwargs): diff --git a/tests/unit/ci_promote_marker.py b/tests/unit/ci_promote_marker.py new file mode 100644 index 000000000000..6be61eaae0ae --- /dev/null +++ b/tests/unit/ci_promote_marker.py @@ -0,0 +1,612 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +hpu_ci_tests = [ + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[None]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_train_schedule_singlestage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_schedule_firststage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[1]", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_enable_argument", + "unit/runtime/test_ds_config_model.py::test_config_base", + "unit/comm/test_dist.py::TestWorldSizeOverrideDistTest::test_world_size_1", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_211", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_122", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[2-2]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[33-33]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[0-0]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[1-1]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[32-32]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[5-5]", + "unit/runtime/half_precision/test_bf16.py::TestZeroAllowUntestedOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestInitNoOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestDistInit::test", "unit/launcher/test_run.py::test_parser_local", + "unit/launcher/test_run.py::test_parser_mutual_exclusive", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupLR-params0]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[OneCycle-params2]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[LRRangeTest-params3]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupDecayLR-params1]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0.1-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-211]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.1-0-10-0]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-210]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-101]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-101]", + "unit/runtime/utils/test_partition.py::test_float_balanced", + "unit/runtime/utils/test_partition.py::test_int_balanced", + "unit/runtime/utils/test_partition.py::test_easy_balance_uniform", + "unit/runtime/utils/test_partition.py::test_float_midheavy", + "unit/runtime/utils/test_partition.py::test_short_partition_uniform", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings2]", + "unit/autotuning/test_autotuning.py::test_command_line", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings4]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings3]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[None]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings1]", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/runtime/test_pld.py::TestNonPLDModel::test_non_pld_model", + "unit/runtime/zero/test_zero_config.py::test_zero_config_deprecatedfields", + "unit/runtime/zero/test_zero_config.py::test_zero_config_aliasfields", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_output[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_input[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensorOutputOrdering::test_ckpt_non_tensor_output_ordering[non_tensor_output3]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[Optimizer]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[optimizer_type2]", + "unit/elasticity/test_elastic.py::test_proper_mbsz", "unit/runtime/pipe/test_topology.py::test_topology_rank_repr", + "unit/runtime/pipe/test_topology.py::test_topology_2d", "unit/runtime/pipe/test_topology.py::test_primes", + "unit/runtime/sparse_tensor/test_csr.py::test_csr_addition_different", + "unit/utils/test_get_optim_files.py::test_get_optim_files[2]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[12]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[24]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[1]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[gpt2-text-generation-fp16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[gpt2-text-generation-fp16-noCG]" +] + +hpu_ci_tests_4cards = [ + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[None]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_train_schedule_singlestage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_schedule_firststage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[1]", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_enable_argument", + "unit/runtime/test_ds_config_model.py::test_config_base", + "unit/comm/test_dist.py::TestWorldSizeOverrideDistTest::test_world_size_1", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_211", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_122", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[2-2]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[33-33]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[0-0]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[1-1]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[32-32]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[5-5]", + "unit/runtime/half_precision/test_bf16.py::TestZeroAllowUntestedOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestInitNoOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestDistInit::test", "unit/launcher/test_run.py::test_parser_local", + "unit/launcher/test_run.py::test_parser_mutual_exclusive", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupLR-params0]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[OneCycle-params2]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[LRRangeTest-params3]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupDecayLR-params1]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0.1-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-211]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.1-0-10-0]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-210]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-101]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-101]", + "unit/runtime/utils/test_partition.py::test_float_balanced", + "unit/runtime/utils/test_partition.py::test_int_balanced", + "unit/runtime/utils/test_partition.py::test_easy_balance_uniform", + "unit/runtime/utils/test_partition.py::test_float_midheavy", + "unit/runtime/utils/test_partition.py::test_short_partition_uniform", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings2]", + "unit/autotuning/test_autotuning.py::test_command_line", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings4]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings3]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[None]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings1]", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/runtime/test_pld.py::TestNonPLDModel::test_non_pld_model", + "unit/runtime/zero/test_zero_config.py::test_zero_config_deprecatedfields", + "unit/runtime/zero/test_zero_config.py::test_zero_config_aliasfields", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_output[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_input[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensorOutputOrdering::test_ckpt_non_tensor_output_ordering[non_tensor_output3]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[Optimizer]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[optimizer_type2]", + "unit/elasticity/test_elastic.py::test_proper_mbsz", "unit/runtime/pipe/test_topology.py::test_topology_rank_repr", + "unit/runtime/pipe/test_topology.py::test_topology_2d", "unit/runtime/pipe/test_topology.py::test_primes", + "unit/runtime/sparse_tensor/test_csr.py::test_csr_addition_different", + "unit/utils/test_get_optim_files.py::test_get_optim_files[2]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[12]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[24]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[1]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[gpt2-text-generation-fp16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[gpt2-text-generation-fp16-noCG]", + "unit/comm/test_dist.py::TestDistInitNoEnv::test", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_throughput_calculation", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-zero2]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-zero1]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-fp32-zero1]", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[2]", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[3]", + "unit/runtime/zero/test_zero_context.py::TestGatherUpdate::test", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1]", + "unit/runtime/zero/test_zero_context.py::TestScatterGather::test", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config0]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True]", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-EleutherAI/gpt-neo-125m-zero_stage=2-bsz=1]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_base[topo_config1]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config2]", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config1]", + "unit/runtime/half_precision/test_fp16.py::TestFP16OptimizerForMoE::test_unfused_gradnorm", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-facebook/opt-350m-zero_stage=3-bsz=1]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[facebook/opt-350m-fp16]", + "unit/linear/test_linear.py::TestLoRALinear::test[2]", + "unit/linear/test_linear.py::TestOptimizedLinear::test[qbit8-bws2]", + "unit/linear/test_quant_param.py::TestQuantParam::test_unsupported_dtypes[dtype0]" +] + +hpu_promote_tests = [ + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-False-resulting_optimizer9]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-False-resulting_optimizer3]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-True-resulting_optimizer13]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-False-resulting_optimizer1]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-True-resulting_optimizer7]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-True-resulting_optimizer5]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-True-resulting_optimizer15]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-False-resulting_optimizer11]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-False-resulting_optimizer2]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-False-resulting_optimizer8]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-False-resulting_optimizer0]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-True-resulting_optimizer14]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-True-resulting_optimizer12]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-True-resulting_optimizer4]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-True-resulting_optimizer6]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-False-resulting_optimizer10]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[Adam]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[FusedAdam]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-3]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-3]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[2]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[1]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[3]", + "unit/compression/test_compression.py::TestCompression::test_mpu_compress", + "unit/launcher/test_run.py::test_parser_errors", "unit/launcher/test_run.py::test_num_plus_parser", + "unit/launcher/test_run.py::test_parser_multinode", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-15]", + "unit/runtime/pipe/test_topology.py::test_topology_3d", + "unit/runtime/pipe/test_topology.py::test_topology_comm_list", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bfloat16]", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bf16]", + "unit/runtime/test_ds_config_dict.py::TestNoModel::test", + "unit/runtime/test_ds_config_dict.py::TestDeprecatedDeepScaleConfig::test", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs3[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs2[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs3[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs1_outputs1[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs1[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs1_outputs1[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs1[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs2[mask1]", + "unit/launcher/test_ds_arguments.py::test_core_deepscale_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments_no_ds_parser", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_missing_latest", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-scheduler_type2]", + "unit/runtime/utils/test_partition.py::test_balance_bert", + "unit/runtime/zero/test_zero_config.py::test_zero_config_offload_configs", + "unit/runtime/zero/test_zero_config.py::test_zero_offload_optimizer_config_pipeline", + "unit/runtime/test_pld.py::test_pld_schedule[0]", "unit/runtime/test_pld.py::test_pld_schedule[0.9]", + "unit/runtime/test_pld.py::test_pld_schedule[1.0]", "unit/runtime/test_pld.py::test_pld_schedule[0.1]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[1.0]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.9]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.1]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources3]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources1]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources2]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources0]", + "unit/elasticity/test_elastic.py::test_basic_10k", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_111", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_121", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict0]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict1]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict2]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[3]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[facebook/opt-125m-text-generation-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[dslim/bert-base-NER-token-classification-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG]", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False-roberta-base-fill-mask]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]", + "unit/runtime/half_precision/test_bf16.py::TestZeroEmptyGrad::test", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-base-fill-mask-fp16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]" +] + +hpu_promote_tests_4cards = [ + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-False-resulting_optimizer9]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-False-resulting_optimizer3]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-True-resulting_optimizer13]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-False-resulting_optimizer1]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-True-resulting_optimizer7]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-True-resulting_optimizer5]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-True-resulting_optimizer15]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-False-resulting_optimizer11]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-False-resulting_optimizer2]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-False-resulting_optimizer8]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-False-resulting_optimizer0]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-True-resulting_optimizer14]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-True-resulting_optimizer12]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-True-resulting_optimizer4]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-True-resulting_optimizer6]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-False-resulting_optimizer10]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[Adam]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-3]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-3]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[2]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[1]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[3]", + "unit/compression/test_compression.py::TestCompression::test_mpu_compress", + "unit/launcher/test_run.py::test_parser_errors", "unit/launcher/test_run.py::test_num_plus_parser", + "unit/launcher/test_run.py::test_parser_multinode", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-10]", + "unit/runtime/pipe/test_topology.py::test_topology_3d", + "unit/runtime/pipe/test_topology.py::test_topology_comm_list", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bfloat16]", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bf16]", + "unit/runtime/test_ds_config_dict.py::TestNoModel::test", + "unit/runtime/test_ds_config_dict.py::TestDeprecatedDeepScaleConfig::test", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs3[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs1_outputs1[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs1[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs2[mask1]", + "unit/launcher/test_ds_arguments.py::test_core_deepscale_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments_no_ds_parser", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_missing_latest", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-scheduler_type2]", + "unit/runtime/utils/test_partition.py::test_balance_bert", + "unit/runtime/zero/test_zero_config.py::test_zero_config_offload_configs", + "unit/runtime/zero/test_zero_config.py::test_zero_offload_optimizer_config_pipeline", + "unit/runtime/test_pld.py::test_pld_schedule[0]", "unit/runtime/test_pld.py::test_pld_schedule[0.9]", + "unit/runtime/test_pld.py::test_pld_schedule[1.0]", "unit/runtime/test_pld.py::test_pld_schedule[0.1]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[1.0]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources3]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources1]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources2]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources0]", + "unit/elasticity/test_elastic.py::test_basic_10k", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_111", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_121", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict0]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict1]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict2]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[3]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[facebook/opt-125m-text-generation-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[dslim/bert-base-NER-token-classification-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG]", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False-roberta-base-fill-mask]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]", + "unit/runtime/half_precision/test_bf16.py::TestZeroEmptyGrad::test", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-base-fill-mask-fp16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-fp16-zero1]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-bf16-zero2]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-bf16-None]", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[1]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_base[topo_config2]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[bigscience/bloom-560m-fp16]", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-bigscience/bloom-560m-zero_stage=3-bsz=1]", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=2-bsz=1]", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=2-bsz=1]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[facebook/opt-125m-fp16]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[EleutherAI/gpt-neo-125M-fp16]", + "unit/linear/test_ctx.py::TestInitTransformers::test_config_init", + "unit/linear/test_quant_param.py::TestQuantParam::test_unsupported_dtypes[dtype1]", + "unit/linear/test_quant_param.py::TestQuantParam::test_move_to_accelerator" +] + +gpu_ci_tests = [ + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[None]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_train_schedule_singlestage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_schedule_firststage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[1]", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_enable_argument", + "unit/runtime/test_ds_config_model.py::test_config_base", + "unit/comm/test_dist.py::TestWorldSizeOverrideDistTest::test_world_size_1", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_211", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_122", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[2-2]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[33-33]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[0-0]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[1-1]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[32-32]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[5-5]", + "unit/runtime/half_precision/test_bf16.py::TestZeroAllowUntestedOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestInitNoOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestDistInit::test", "unit/launcher/test_run.py::test_parser_local", + "unit/launcher/test_run.py::test_parser_mutual_exclusive", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupLR-params0]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[OneCycle-params2]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[LRRangeTest-params3]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupDecayLR-params1]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0.1-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-211]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.1-0-10-0]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-210]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-101]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-101]", + "unit/runtime/utils/test_partition.py::test_float_balanced", + "unit/runtime/utils/test_partition.py::test_int_balanced", + "unit/runtime/utils/test_partition.py::test_easy_balance_uniform", + "unit/runtime/utils/test_partition.py::test_float_midheavy", + "unit/runtime/utils/test_partition.py::test_short_partition_uniform", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings2]", + "unit/autotuning/test_autotuning.py::test_command_line", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings4]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings3]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[None]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings1]", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/runtime/test_pld.py::TestNonPLDModel::test_non_pld_model", + "unit/runtime/zero/test_zero_config.py::test_zero_config_deprecatedfields", + "unit/runtime/zero/test_zero_config.py::test_zero_config_aliasfields", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_output[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_input[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensorOutputOrdering::test_ckpt_non_tensor_output_ordering[non_tensor_output3]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[Optimizer]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[optimizer_type2]", + "unit/elasticity/test_elastic.py::test_proper_mbsz", "unit/runtime/pipe/test_topology.py::test_topology_rank_repr", + "unit/runtime/pipe/test_topology.py::test_topology_2d", "unit/runtime/pipe/test_topology.py::test_primes", + "unit/runtime/sparse_tensor/test_csr.py::test_csr_addition_different", + "unit/utils/test_get_optim_files.py::test_get_optim_files[2]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[12]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[24]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[1]", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/inference/test_inference.py::TestModelTask::test[gpt2-text-generation-fp16-CG]", + "unit/inference/test_inference.py::TestModelTask::test[gpt2-text-generation-fp16-noCG]" +] + +gpu_promote_tests = [ + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-False-resulting_optimizer9]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-False-resulting_optimizer3]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-True-resulting_optimizer13]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-False-resulting_optimizer1]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-True-resulting_optimizer7]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-True-resulting_optimizer5]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-True-resulting_optimizer15]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-False-resulting_optimizer11]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-False-resulting_optimizer2]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-False-resulting_optimizer8]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-False-resulting_optimizer0]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-True-resulting_optimizer14]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-True-resulting_optimizer12]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-True-resulting_optimizer4]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-True-resulting_optimizer6]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-False-resulting_optimizer10]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[Adam]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[FusedAdam]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-3]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-3]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[2]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[1]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[3]", + "unit/compression/test_compression.py::TestCompression::test_mpu_compress", + "unit/launcher/test_run.py::test_parser_errors", "unit/launcher/test_run.py::test_num_plus_parser", + "unit/launcher/test_run.py::test_parser_multinode", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-15]", + "unit/runtime/pipe/test_topology.py::test_topology_3d", + "unit/runtime/pipe/test_topology.py::test_topology_comm_list", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bfloat16]", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bf16]", + "unit/runtime/test_ds_config_dict.py::TestNoModel::test", + "unit/runtime/test_ds_config_dict.py::TestDeprecatedDeepScaleConfig::test", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs3[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs2[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs3[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs1_outputs1[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs1[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs1_outputs1[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs1[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs2[mask1]", + "unit/launcher/test_ds_arguments.py::test_core_deepscale_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments_no_ds_parser", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_missing_latest", + "unit/compression/test_compression.py::TestCompression::test_conv1d_convertion", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-scheduler_type2]", + "unit/runtime/utils/test_partition.py::test_balance_bert", + "unit/runtime/zero/test_zero_config.py::test_zero_config_offload_configs", + "unit/runtime/zero/test_zero_config.py::test_zero_offload_optimizer_config_pipeline", + "unit/runtime/test_pld.py::test_pld_schedule[0]", "unit/runtime/test_pld.py::test_pld_schedule[0.9]", + "unit/runtime/test_pld.py::test_pld_schedule[1.0]", "unit/runtime/test_pld.py::test_pld_schedule[0.1]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[1.0]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.9]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.1]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources3]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources1]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources2]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources0]", + "unit/elasticity/test_elastic.py::test_basic_10k", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_111", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_121", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict0]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict1]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict2]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[3]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG]", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False-roberta-base-fill-mask]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]", + "unit/runtime/half_precision/test_bf16.py::TestZeroEmptyGrad::test", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp16-CG]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]" +] diff --git a/tests/unit/common.py b/tests/unit/common.py index 1774bcfae9ff..966acfa6145b 100644 --- a/tests/unit/common.py +++ b/tests/unit/common.py @@ -23,7 +23,7 @@ from _pytest.fixtures import FixtureLookupError, FixtureFunctionMarker # Worker timeout for tests that hang -DEEPSPEED_TEST_TIMEOUT = int(os.environ.get('DS_UNITTEST_TIMEOUT', '600')) +DEEPSPEED_TEST_TIMEOUT = int(os.environ.get('DEEPSPEED_TEST_TIMEOUT', '600')) def is_rocm_pytorch(): @@ -93,6 +93,15 @@ def set_accelerator_visible(): elif get_accelerator().device_name() == 'npu': npu_smi = subprocess.check_output(['npu-smi', 'info', '-l']) num_accelerators = int(npu_smi.decode('utf-8').strip().split('\n')[0].split(':')[1].strip()) + elif get_accelerator().device_name() == 'hpu': + try: + hl_smi = subprocess.check_output(['hl-smi', "-L"]) + num_accelerators = re.findall(r"Module ID\s+:\s+(\d+)", hl_smi.decode()) + except FileNotFoundError: + sim_list = subprocess.check_output(['ls', '-1', '/dev/accel']) + num_accelerators = re.findall(r"accel(\d+)", sim_list.decode()) + num_accelerators = sorted(num_accelerators, key=int) + os.environ["HABANA_VISIBLE_MODULES"] = ",".join(num_accelerators) else: assert get_accelerator().device_name() == 'cpu' cpu_sockets = int( @@ -450,7 +459,7 @@ def __call__(self, request): world_size = mark.args[0] break else: - world_size = self.world_size + world_size = self._fixture_kwargs.get("world_size", self.world_size) if isinstance(world_size, int): world_size = [world_size] diff --git a/tests/unit/compile_marker.py b/tests/unit/compile_marker.py new file mode 100644 index 000000000000..c66f927c7f9f --- /dev/null +++ b/tests/unit/compile_marker.py @@ -0,0 +1,603 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +compile_tests_4c = [ + "unit/runtime/compile/test_compile_wrapper.py::TestCustomMethod::test_custom_function", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-2-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-1-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-2-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-1-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-1-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-2-dtype0]", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_custom_backend", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compiler_fn", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_disabled", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_kwargs", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compile_kwargs", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest[True] PASSED", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_missing_latest[True] PASSED", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-True]", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-True]", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-True]", + "unit/checkpoint/test_shared_weights.py::TestCheckpointSharedWeights::test_checkpoint_shared_weights[True]", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[WARN-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[IGNORE-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[FAIL-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[0-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeRONonDistributed::test_chmod_exception_handling[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeRONonDistributed::test_chmod_exception_handling[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeRONonDistributed::test_chmod_exception_handling[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-True]", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest[True]", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_missing_latest[True]", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fp32_optimizer[True]", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[WARN-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[FAIL-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[IGNORE-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unknown_tag_validation[True]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-True-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-False-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-True-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-True-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-False-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-False-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-True-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-True-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-False-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-False-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-True-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-True-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-True-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-True-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-False-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-False-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-True-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-False-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-False-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-False-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-False-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-True-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-False-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-True-1-dtype1]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-True-True-False-False]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-True-False-False-False]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-True-True-True-True]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-False-False-False-False]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-True-True-True-False]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-True-False-True-True]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-True-False-True-False]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-False-False-True-True]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-False-False-True-False]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[facebook/opt-125m-fp16-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[EleutherAI/gpt-j-6B-fp16-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[EleutherAI/gpt-neo-125M-fp16-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[facebook/opt-350m-fp16-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-125m-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[bigscience/bloom-560m-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-neo-125M-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-j-6B-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-350m-True]", + "unit/inference/test_inference.py::TestLowCpuMemUsage::test[gpt2-True]", + "unit/inference/test_inference.py::TestMPSize::test[fp32-bloom-True]", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-j-True]", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-True]", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo-True]", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo-True]", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neox-True]", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neox-True]", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-j-True]", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian-True-True]", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian-False-True]", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian-True-True]", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-False-True]", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-True-True]", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-False-True]", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-True-True]", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian-False-True]", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-True]", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[bigscience/bloom-560m-fp16-True]", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws1-fp32-roberta-True]", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws1-fp32-t5-True]", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws2-fp32-roberta-True]", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws2-fp32-t5-True]", +] +compile_tests_1c = [ + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-Triton-True-True]", +] diff --git a/tests/unit/inference/test_checkpoint_sharding.py b/tests/unit/inference/test_checkpoint_sharding.py index 5bae9a151a27..1466be17e134 100644 --- a/tests/unit/inference/test_checkpoint_sharding.py +++ b/tests/unit/inference/test_checkpoint_sharding.py @@ -14,6 +14,7 @@ from huggingface_hub import snapshot_download from transformers.utils import is_offline_mode from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.accelerator import get_accelerator if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("This op had not been implemented on this system.", allow_module_level=True) @@ -44,6 +45,8 @@ def model_name(request): @pytest.fixture(params=[torch.float16, torch.int8], ids=["fp16", "int8"]) def dtype(request): + if request.param not in get_accelerator().supported_dtypes(): + pytest.skip(f"{request.param} not supported by {get_accelerator().device_name()}.") return request.param @@ -73,7 +76,9 @@ def run(self, model_name, class_tmpdir): class TestCheckpointShard(DistributedTest): world_size = 2 - def test(self, model_name, dtype, class_tmpdir, save_shard): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test(self, model_name, dtype, class_tmpdir, save_shard, compile_mode): + world_size = int(os.getenv("WORLD_SIZE", "1")) inf_config = { "replace_with_kernel_inject": True, @@ -92,6 +97,8 @@ def test(self, model_name, dtype, class_tmpdir, save_shard): model = AutoModelForCausalLM.from_config(model_config, torch_dtype=torch.bfloat16) model = model.eval() model = deepspeed.init_inference(model, config=inf_config) + if compile_mode: + model.compile() check_dtype(model, dtype) @@ -99,7 +106,8 @@ def test(self, model_name, dtype, class_tmpdir, save_shard): class TestCheckpointShardinAutoTP(DistributedTest): world_size = 2 - def test(self, model_name, class_tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test(self, model_name, class_tmpdir, compile_mode): def write_checkpoints_json(model_name, class_tmpdir): import json @@ -137,3 +145,5 @@ def write_checkpoints_json(model_name, class_tmpdir): model = AutoModelForCausalLM.from_config(model_config, torch_dtype=torch.bfloat16) model = model.eval() model = deepspeed.init_inference(model, config=inf_config) + if compile_mode: + model.compile() diff --git a/tests/unit/inference/test_inference.py b/tests/unit/inference/test_inference.py index 36003319856c..e13f2e8e7dc7 100644 --- a/tests/unit/inference/test_inference.py +++ b/tests/unit/inference/test_inference.py @@ -30,6 +30,7 @@ from deepspeed.ops.op_builder import OpBuilder from unit.common import DistributedTest +from transformers import BertLayer rocm_version = OpBuilder.installed_rocm_version() if rocm_version != (0, 0): @@ -67,6 +68,36 @@ "facebook/opt-125m", # 125m, 1.7B, ..., 175B variants have the same model architecture. "facebook/opt-350m", # 350m applies layer norm after attention layer which is different than other variants. ] +ModelsInjectionPolicyMap = { + "distilbert/distilbert-base-cased-distilled-squad": { + BertLayer: ("output_layer_norm", ) + }, + "openai-community/gpt2": { + BertLayer: ("mlp", ) + }, + "distilbert/distilgpt2": { + BertLayer: ("mlp", ) + }, + "Norod78/hebrew-bad_wiki-gpt_neo-tiny": { + BertLayer: ("out_proj", ) + }, + "EleutherAI/gpt-j-6b": { + BertLayer: ("mlp", ) + }, + "EleutherAI/pythia-70m-deduped": { + BertLayer: ("mlp", ) + }, + "bigscience/bloom-560m": { + BertLayer: ("mlp", ) + }, + "facebook/opt-125m": { + BertLayer: ("out_proj", ) + }, + "facebook/opt-350m": { + BertLayer: ("out_proj", ) + }, +} +DEFAULT_INJECTION_POLICY = {BertLayer: ("output.dense", )} _test_models = set(_bert_models + _roberta_models + _gpt_models + _opt_models) _test_tasks = [ "fill-mask", "question-answering", "text-classification", "token-classification", "text-generation", @@ -86,7 +117,8 @@ def _hf_model_list() -> List[ModelInfo]: cache_dir = os.getenv("HF_HOME", "~/.cache/huggingface") cache_file_path = os.path.join(cache_dir, "DS_model_cache.pkl") - cache_expiration_seconds = 60 * 60 * 24 # 1 day + num_days = os.getenv("HF_CACHE_EXPIRY_DAYS", 1) + cache_expiration_seconds = num_days * 60 * 60 * 24 # Load or initialize the cache model_data = {"cache_time": 0, "model_list": []} @@ -97,7 +129,8 @@ def _hf_model_list() -> List[ModelInfo]: current_time = time.time() # Update the cache if it has expired - if (model_data["cache_time"] + cache_expiration_seconds) < current_time: + if ((model_data["cache_time"] + cache_expiration_seconds) < current_time) or os.getenv("FORCE_UPDATE_HF_CACHE", + default=False): api = HfApi() model_data["model_list"] = [ ModelInfo(modelId=m.modelId, pipeline_tag=m.pipeline_tag, tags=m.tags) for m in api.list_models() @@ -125,6 +158,7 @@ def _hf_model_list() -> List[ModelInfo]: pytest.mt_names = [f"{m}-{t}" for m, t in pytest.model_w_tasks] +#Hugging Face model: WA. Hugging Face models were updated, causing the _test_models list to not be found in _hf_model_names. Changed the fixture from True to False. @pytest.fixture(scope="module", autouse=True) def verify_models(): # Verify all test models are registered in HF @@ -162,6 +196,11 @@ def enable_triton(request): return request.param +@pytest.fixture(params=[1, 2], ids=["ws1", "ws2"]) +def world_size(request): + return request.param + + """ Fixtures for running query """ @@ -275,11 +314,19 @@ def verify_injection(module): verify_injection(model) +# Used to Get Device name +def getDeviceId(local_rank): + device = local_rank + if get_accelerator().device_name() != 'cuda': + device = torch.device(f"{get_accelerator().device_name()}") + return device + + # Verify that test is valid def validate_test(model_w_task, dtype, enable_cuda_graph, enable_triton): model, task = model_w_task msg = "" - if enable_cuda_graph and (torch_info["cuda_version"] == "0.0"): + if enable_cuda_graph and (torch_info["cuda_version"] == "0.0") and get_accelerator().device_name() != 'hpu': msg = "CUDA not detected, cannot use CUDA Graph" elif enable_cuda_graph and pkg_version.parse(torch.__version__) < pkg_version.parse("1.10"): msg = "CUDA Graph is only available in torch versions >= 1.10" @@ -296,6 +343,8 @@ def validate_test(model_w_task, dtype, enable_cuda_graph, enable_triton): msg = f"Bloom models only support half precision, cannot use dtype {dtype}" elif (model not in _bert_models + _roberta_models) and enable_cuda_graph: msg = "Non bert/roberta models do no support CUDA Graph" + elif not get_accelerator().is_triton_supported() and enable_triton: + msg = f"Triton is not supported for {get_accelerator().device_name()}." elif enable_triton and not (dtype in [torch.half]): msg = "Triton is for fp16" elif enable_triton and not deepspeed.HAS_TRITON: @@ -311,7 +360,9 @@ def validate_test(model_w_task, dtype, enable_cuda_graph, enable_triton): return msg -@pytest.mark.inference +@pytest.mark.parametrize('compile_mode', [True, False]) +@pytest.mark.parametrize("replace_with_kernel_inject", [True, False]) +@pytest.mark.nightly class TestModelTask(DistributedTest): world_size = 1 @@ -324,6 +375,8 @@ def test( query, inf_kwargs, assert_fn, + replace_with_kernel_inject, + compile_mode, perf_meas=True, ): invalid_test_msg = validate_test(model_w_task, dtype, enable_cuda_graph, enable_triton) @@ -366,11 +419,20 @@ def test( 'use_triton': enable_triton, 'triton_autotune': False, } + if not replace_with_kernel_inject: + if get_accelerator().device_name() != 'hpu': + pytest.skip("Kernel Inject False validation for HPU tests.", ) + injection_policy = ModelsInjectionPolicyMap.get(model, DEFAULT_INJECTION_POLICY) + args['injection_policy'] = injection_policy + args['replace_with_kernel_inject'] = False if pipe.tokenizer.model_max_length < deepspeed.ops.transformer.inference.config.DeepSpeedInferenceConfig( ).max_out_tokens: args.update({'max_out_tokens': pipe.tokenizer.model_max_length}) pipe.model = deepspeed.init_inference(pipe.model, **args) - check_injection(pipe.model) + if compile_mode: + pipe.model.compile() + if replace_with_kernel_inject: + check_injection(pipe.model) # Warm-up queries for perf measurement #for i in range(10): # _ = pipe(query, **inf_kwargs) @@ -397,6 +459,7 @@ def test( assert assert_fn(bs_output, ds_output) +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.seq_inference @pytest.mark.parametrize("model_w_task", [("EleutherAI/gpt-neo-1.3B", "text-generation"), ("EleutherAI/gpt-neox-20b", "text-generation"), @@ -413,6 +476,7 @@ def test( query, inf_kwargs, assert_fn, + compile_mode, ): invalid_test_msg = validate_test(model_w_task, dtype, enable_cuda_graph=False, enable_triton=False) if invalid_test_msg: @@ -433,6 +497,8 @@ def test( mp_size=self.world_size, dtype=dtype, replace_with_kernel_inject=True) + if compile_mode: + pipe.model.compile() check_injection(pipe.model) # Switch device to GPU so that input tensors are not on CPU pipe.device = torch.device(get_accelerator().device_name(local_rank)) @@ -443,6 +509,7 @@ def test( assert assert_fn(bs_output, ds_output) +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.inference @pytest.mark.parametrize("model_w_task", [("openai-community/gpt2", "text-generation")], ids=["gpt2"]) class TestLowCpuMemUsage(DistributedTest): @@ -454,6 +521,7 @@ def test( query, inf_kwargs, assert_fn, + compile_mode, ): model, task = model_w_task dtype = torch.float16 @@ -461,20 +529,22 @@ def test( pytest.skip(f"Acceleraor {get_accelerator().device_name()} does not support {dtype}.") local_rank = int(os.getenv("LOCAL_RANK", "0")) - - pipe = pipeline(task, model=model, model_kwargs={"low_cpu_mem_usage": True}, device=local_rank, framework="pt") + device = getDeviceId(local_rank) + pipe = pipeline(task, model=model, model_kwargs={"low_cpu_mem_usage": True}, device=device, framework="pt") bs_output = pipe(query, **inf_kwargs) pipe.model = deepspeed.init_inference(pipe.model, mp_size=self.world_size, dtype=dtype, replace_method="auto", replace_with_kernel_inject=True) - + if compile_mode: + pipe.model.compile() ds_output = pipe(query, **inf_kwargs) assert assert_fn(bs_output, ds_output) +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.seq_inference @pytest.mark.parametrize( "model_w_task, injection_policy", @@ -490,7 +560,6 @@ def test( ) @pytest.mark.parametrize("dtype", [torch.float], ids=["fp32"]) class TestInjectionPolicy(DistributedTest): - world_size = [1, 2] def test( self, @@ -500,6 +569,8 @@ def test( inf_kwargs, assert_fn, dtype, + world_size, + compile_mode, ): invalid_test_msg = validate_test(model_w_task, dtype, enable_cuda_graph=False, enable_triton=False) if invalid_test_msg: @@ -507,7 +578,6 @@ def test( model, task = model_w_task local_rank = int(os.getenv("LOCAL_RANK", "0")) - world_size = int(os.getenv("WORLD_SIZE", "2")) pipe = pipeline(task, model=model, @@ -519,6 +589,8 @@ def test( mp_size=world_size, dtype=dtype, injection_policy=injection_policy) + if compile_mode: + pipe.model.compile() ds_output = pipe(query, **inf_kwargs) print(local_rank, "baseline", bs_output) @@ -526,7 +598,9 @@ def test( assert assert_fn(bs_output, ds_output) +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.seq_inference +@pytest.mark.parametrize('keep_module_on_host', [True, False]) @pytest.mark.parametrize( "model_w_task", [("Helsinki-NLP/opus-mt-en-de", "translation"), ("Salesforce/codegen-350M-mono", "text-generation")], @@ -543,6 +617,8 @@ def test( inf_kwargs, assert_fn, dtype, + compile_mode, + keep_module_on_host, ): invalid_test_msg = validate_test(model_w_task, dtype, enable_cuda_graph=False, enable_triton=False) if invalid_test_msg: @@ -565,7 +641,12 @@ def test( framework="pt") bs_output = pipe(query, **inf_kwargs) - pipe.model = deepspeed.init_inference(pipe.model, mp_size=world_size, dtype=dtype) + pipe.model = deepspeed.init_inference(pipe.model, + mp_size=world_size, + dtype=dtype, + keep_module_on_host=keep_module_on_host) + if compile_mode: + pipe.model.compile() ds_output = pipe(query, **inf_kwargs) print(local_rank, "baseline", bs_output) @@ -580,6 +661,8 @@ def test_odd_world_size( inf_kwargs, assert_fn, dtype, + compile_mode, + keep_module_on_host, ): invalid_test_msg = validate_test(model_w_task, dtype, enable_cuda_graph=False, enable_triton=False) if invalid_test_msg: @@ -597,7 +680,12 @@ def test_odd_world_size( framework="pt") bs_output = pipe(query, **inf_kwargs) - pipe.model = deepspeed.init_inference(pipe.model, mp_size=world_size, dtype=dtype) + pipe.model = deepspeed.init_inference(pipe.model, + mp_size=world_size, + dtype=dtype, + keep_module_on_host=keep_module_on_host) + if compile_mode: + pipe.model.compile() ds_output = pipe(query, **inf_kwargs) print(local_rank, "baseline", bs_output) @@ -605,6 +693,7 @@ def test_odd_world_size( assert assert_fn(bs_output, ds_output) +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.nightly @pytest.mark.parametrize( "model_family, model_name", @@ -619,7 +708,7 @@ class TestLMCorrectness(DistributedTest): world_size = 1 exec_timeout = 1200 # Give these tests longer to complete - def test(self, model_family, model_name, task): + def test(self, model_family, model_name, task, compile_mode): # imports here to avoid import errors when pytest collects tests import lm_eval import lm_eval.models @@ -650,7 +739,7 @@ def no_pool_bootstrap_stderr(f, xs, iters): dtype = torch.half lm = lm_eval.models.get_model(model_family).create_from_arg_string(f"pretrained={model_name}", {"device": "cpu"}) - setattr(lm, model_family, getattr(lm, model_family).half().to(device)) + setattr(lm, model_family, getattr(lm, model_family).to(dtype=dtype).to(device)) lm._device = device else: if get_accelerator().device_name() == 'hpu': @@ -677,6 +766,8 @@ def no_pool_bootstrap_stderr(f, xs, iters): replace_with_kernel_inject=True, enable_cuda_graph=False, ) + if compile_mode: + ds_model.compile() check_injection(ds_model) setattr(lm, model_family, ds_model) get_accelerator().synchronize() diff --git a/tests/unit/inference/test_model_profiling.py b/tests/unit/inference/test_model_profiling.py index 23e49f89025b..319055d0ea55 100644 --- a/tests/unit/inference/test_model_profiling.py +++ b/tests/unit/inference/test_model_profiling.py @@ -16,6 +16,9 @@ if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("This op had not been implemented on this system.", allow_module_level=True) +if torch.half not in get_accelerator().supported_dtypes(): + pytest.skip(f"fp16 not supported, valid dtype: {get_accelerator().supported_dtypes()}", allow_module_level=True) + @pytest.mark.inference @pytest.mark.parametrize("use_cuda_events", [True, False]) diff --git a/tests/unit/inference/v2/inference_test_utils.py b/tests/unit/inference/v2/inference_test_utils.py index d63c51267e51..9405b6fde724 100644 --- a/tests/unit/inference/v2/inference_test_utils.py +++ b/tests/unit/inference/v2/inference_test_utils.py @@ -44,3 +44,10 @@ def allclose(x, y, tolerances: Tuple[int, int] = None): else: rtol, atol = tolerances return torch.allclose(x, y, rtol=rtol, atol=atol) + + +def skip_on_inference_v2(): + if get_accelerator().device_name() == 'hpu': + return True + else: + return False diff --git a/tests/unit/inference/v2/kernels/core_ops/test_bias_activation.py b/tests/unit/inference/v2/kernels/core_ops/test_bias_activation.py index 376188b92565..49dbdc715556 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_bias_activation.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_bias_activation.py @@ -11,7 +11,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.inference_utils import ActivationType, DtypeEnum from deepspeed.inference.v2.kernels.core_ops import CUDABiasActivation -from ....v2.inference_test_utils import get_dtypes, allclose +from ....v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_bias_act_implementation(input: torch.Tensor, bias: Optional[torch.Tensor], diff --git a/tests/unit/inference/v2/kernels/core_ops/test_blas_linear.py b/tests/unit/inference/v2/kernels/core_ops/test_blas_linear.py index 864db6204a16..9d8d2c177607 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_blas_linear.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_blas_linear.py @@ -10,7 +10,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.core_ops import BlasLibLinear -from ....v2.inference_test_utils import allclose +from ....v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') # Note: only testing with FP16 and BF16 because we use TF32 on Ampere and we don't have a good # set of tolerances. Since this is just on top of BLAS though, the test is more about diff --git a/tests/unit/inference/v2/kernels/core_ops/test_gated_activation.py b/tests/unit/inference/v2/kernels/core_ops/test_gated_activation.py index 8cb95a6cdcba..4232cb7a6324 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_gated_activation.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_gated_activation.py @@ -11,7 +11,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.core_ops import CUDAGatedActivation from deepspeed.inference.v2.inference_utils import ActivationType -from ....v2.inference_test_utils import get_dtypes, allclose +from ....v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_geglu_implementation(input: torch.Tensor, diff --git a/tests/unit/inference/v2/kernels/core_ops/test_post_ln.py b/tests/unit/inference/v2/kernels/core_ops/test_post_ln.py index 0b489894bb9b..0549316081ee 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_post_ln.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_post_ln.py @@ -8,7 +8,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.core_ops import CUDAFPPostLN -from ....v2.inference_test_utils import get_dtypes, allclose +from ....v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: torch.Tensor, gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/kernels/core_ops/test_pre_ln.py b/tests/unit/inference/v2/kernels/core_ops/test_pre_ln.py index ffb748e57af2..4da5173d5f53 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_pre_ln.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_pre_ln.py @@ -8,7 +8,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.core_ops import CUDAFPPreLN -from ....v2.inference_test_utils import get_dtypes, allclose +from ....v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: torch.Tensor, gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/kernels/core_ops/test_rms_norm.py b/tests/unit/inference/v2/kernels/core_ops/test_rms_norm.py index 63b16da171c9..16357d0f2967 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_rms_norm.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_rms_norm.py @@ -9,7 +9,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.inference_utils import DtypeEnum from deepspeed.inference.v2.kernels.core_ops import CUDARMSNorm, CUDARMSPreNorm -from ....v2.inference_test_utils import get_dtypes, allclose +from ....v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_rms_norm(vals: torch.Tensor, gamma: torch.Tensor, epsilon: float = 1e-5) -> torch.Tensor: diff --git a/tests/unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py b/tests/unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py index ed76dabe1f4c..c396cc1268d0 100644 --- a/tests/unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py +++ b/tests/unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py @@ -9,7 +9,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.inference_utils import ActivationType, DtypeEnum from deepspeed.inference.v2.kernels.cutlass_ops import MoEGEMM -from ....v2.inference_test_utils import allclose +from ....v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') SINGLE_EXPERT_CASES = [(13, 2048, 2048), (256, 1024, 4096), (278, 5120, 2048), (893, 5120, 2560)] diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_atom_builder.py b/tests/unit/inference/v2/kernels/ragged_ops/test_atom_builder.py index a33c938a0608..ad88ff2c5d69 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_atom_builder.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_atom_builder.py @@ -6,8 +6,13 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.ragged_ops import AtomBuilder from .ragged_testing_utils import build_complex_batch +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') Q_BLOCK_SIZE = 128 KV_BLOCK_SIZE = 128 diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py index ce5a178c9548..bb0192bbbde6 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py @@ -23,7 +23,10 @@ from deepspeed.ops.op_builder import RaggedUtilsBuilder from .ragged_testing_utils import build_batch_and_manager -from ....v2.inference_test_utils import allclose +from ....v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') try: from flash_attn.flash_attn_interface import flash_attn_varlen_func diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py index 5f1ef930952c..03562e48390c 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py @@ -9,6 +9,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.ragged_ops import LinearBlockedKVCopy from .ragged_testing_utils import build_batch_and_manager, validate_kv_cache +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') @pytest.mark.inference_v2_ops diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py index 156be9929d92..06d67777e65d 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py @@ -12,7 +12,10 @@ from deepspeed.inference.v2.kernels.ragged_ops import BlockedRotaryEmbeddings, BlockedTrainedRotaryEmbeddings from deepspeed.inference.v2.ragged import RaggedBatchWrapper, DSSequenceDescriptor from .ragged_testing_utils import build_batch_and_manager, validate_kv_cache -from ....v2.inference_test_utils import allclose +from ....v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') """ NOTE(cmikeh2): It is very possible to see unit test failures (even on FP16) depending on when certain values are casted up to or down from float32. If we are seeing accuracy issues, we should diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_logits_gather.py b/tests/unit/inference/v2/kernels/ragged_ops/test_logits_gather.py index 1feefa9ee588..e00aa85d194c 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_logits_gather.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_logits_gather.py @@ -10,9 +10,12 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.ragged_ops import RaggedLogitsGather -from ....v2.inference_test_utils import allclose, get_dtypes +from ....v2.inference_test_utils import allclose, get_dtypes, skip_on_inference_v2 from .ragged_testing_utils import build_simple_batch +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') + def baseline_implementation(hidden_states: torch.Tensor, seq_lens: List[int]) -> torch.Tensor: output = torch.empty((len(seq_lens), hidden_states.shape[1]), diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_moe_gather.py b/tests/unit/inference/v2/kernels/ragged_ops/test_moe_gather.py index 3907fc3e3a4b..6538a81ec00a 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_moe_gather.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_moe_gather.py @@ -14,6 +14,10 @@ RaggedTopKGating, ) from .ragged_testing_utils import build_simple_batch +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') """ For simplicity's sake, these tests do rely on ``RaggedTopKGating`` and ``MoEScatter`` to produce correct inputs. If either of these kernels is broken diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py b/tests/unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py index aae459f06a6f..9edc014eae33 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py @@ -10,6 +10,10 @@ from deepspeed.inference.v2.inference_utils import DtypeEnum from deepspeed.inference.v2.kernels.ragged_ops import MoEScatter, RaggedTopKGating from .ragged_testing_utils import build_simple_batch +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') """ For simplicity's sake, these tests do rely on ``RaggedTopKGating`` to produce correct inputs. If ``RaggedTopKGating`` is broken, these tests will fail, so double check diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py b/tests/unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py index f179f62a9b12..32d7d312a4cf 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py @@ -10,9 +10,12 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.ragged_ops import RaggedEmbeddingKernel -from ....v2.inference_test_utils import allclose, get_dtypes +from ....v2.inference_test_utils import allclose, get_dtypes, skip_on_inference_v2 from .ragged_testing_utils import build_batch_and_manager +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') + def baseline_implementation(token_ids: torch.Tensor, embedding_table: torch.Tensor, diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_top_k_gating.py b/tests/unit/inference/v2/kernels/ragged_ops/test_top_k_gating.py index 5fa0c8a079f0..178512351c0f 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_top_k_gating.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_top_k_gating.py @@ -11,7 +11,10 @@ from deepspeed.inference.v2.inference_utils import DtypeEnum from deepspeed.inference.v2.kernels.ragged_ops import RaggedTopKGating from .ragged_testing_utils import build_simple_batch -from ...inference_test_utils import allclose +from ....v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def _top_k_gating_testing_helper(n_tokens: int, n_experts: int, n_top_k: int, seed: int = 0xC0FFEE) -> None: diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_contiguify.py b/tests/unit/inference/v2/model_implementations/parameters/test_contiguify.py index 52ff0e134dfc..901d9d9b43e7 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_contiguify.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_contiguify.py @@ -15,6 +15,10 @@ ) from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer from .utils import SimpleParam, DummyInferenceModel +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') class TransformerLayerContainer(LayerContainer): diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_layer_inheritance.py b/tests/unit/inference/v2/model_implementations/parameters/test_layer_inheritance.py index 07ad87e6168d..c457227d5499 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_layer_inheritance.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_layer_inheritance.py @@ -6,10 +6,15 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.inference_parameter import InferenceParameter from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer from .utils import SimpleParam, DummyInferenceModel +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') class ParentLayer(LayerContainer): diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_mapping.py b/tests/unit/inference/v2/model_implementations/parameters/test_mapping.py index 52313cb6f202..0701b8dcc4d8 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_mapping.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_mapping.py @@ -6,10 +6,15 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.allocator import on_device from deepspeed.inference.v2.inference_parameter import InferenceParameter from deepspeed.inference.v2.model_implementations.parameter_base import ParameterBase, ParamList from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') class MultiDependencyContainer(ParameterBase): diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_multi_parameter_layer.py b/tests/unit/inference/v2/model_implementations/parameters/test_multi_parameter_layer.py index b319bf6de4ad..e7ba08b3c2a8 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_multi_parameter_layer.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_multi_parameter_layer.py @@ -6,10 +6,15 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.inference_parameter import InferenceParameter from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer from .utils import validate_device, SimpleParam, ListParam, DummyInferenceModel +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') class MultiParameterLayer(LayerContainer): diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_parameter_list.py b/tests/unit/inference/v2/model_implementations/parameters/test_parameter_list.py index 06ff9047d648..5f39d3251ea9 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_parameter_list.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_parameter_list.py @@ -6,6 +6,7 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.allocator import on_device from deepspeed.inference.v2.inference_parameter import InferenceParameter from deepspeed.inference.v2.model_implementations.parameter_base import ParameterBase, ParamList @@ -13,6 +14,10 @@ from deepspeed.inference.v2.model_implementations.common_parameters import * from .utils import validate_device +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') class SimpleMoELayer(LayerContainer): diff --git a/tests/unit/inference/v2/model_implementations/sharding/test_attn_out_sharding.py b/tests/unit/inference/v2/model_implementations/sharding/test_attn_out_sharding.py index 850c4c24fde6..fb7901dbf938 100644 --- a/tests/unit/inference/v2/model_implementations/sharding/test_attn_out_sharding.py +++ b/tests/unit/inference/v2/model_implementations/sharding/test_attn_out_sharding.py @@ -8,6 +8,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.model_implementations.sharding import * +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') # None of the logic should be dependent on head size. HEAD_SIZE = 64 diff --git a/tests/unit/inference/v2/model_implementations/sharding/test_mlp_sharding.py b/tests/unit/inference/v2/model_implementations/sharding/test_mlp_sharding.py index aac7e5391d8f..553d604d30ee 100644 --- a/tests/unit/inference/v2/model_implementations/sharding/test_mlp_sharding.py +++ b/tests/unit/inference/v2/model_implementations/sharding/test_mlp_sharding.py @@ -8,6 +8,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.model_implementations.sharding import * +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def round_up_to_256(x: int) -> int: diff --git a/tests/unit/inference/v2/model_implementations/sharding/test_qkv_sharding.py b/tests/unit/inference/v2/model_implementations/sharding/test_qkv_sharding.py index 9a1cb9c09c64..86575d2176ad 100644 --- a/tests/unit/inference/v2/model_implementations/sharding/test_qkv_sharding.py +++ b/tests/unit/inference/v2/model_implementations/sharding/test_qkv_sharding.py @@ -10,6 +10,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.model_implementations.sharding import * +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def fill_with_head_ids(head_size: int, n_heads_q: int, n_heads_kv: Optional[int] = None) -> torch.Tensor: diff --git a/tests/unit/inference/v2/modules/test_blas_linear_module.py b/tests/unit/inference/v2/modules/test_blas_linear_module.py index f4d0b1991238..b50819875699 100644 --- a/tests/unit/inference/v2/modules/test_blas_linear_module.py +++ b/tests/unit/inference/v2/modules/test_blas_linear_module.py @@ -13,7 +13,10 @@ from deepspeed.inference.v2.modules import ConfigBundle from deepspeed.inference.v2.modules.configs import DSLinearConfig from deepspeed.inference.v2.modules.interfaces import DSLinearRegistry -from ...v2.inference_test_utils import allclose +from ...v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(hidden_states: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor], diff --git a/tests/unit/inference/v2/modules/test_blocked_attn.py b/tests/unit/inference/v2/modules/test_blocked_attn.py index 6556aa460a44..b55909a69caf 100644 --- a/tests/unit/inference/v2/modules/test_blocked_attn.py +++ b/tests/unit/inference/v2/modules/test_blocked_attn.py @@ -16,7 +16,10 @@ from deepspeed.inference.v2.modules.interfaces import DSSelfAttentionRegistry, DSSelfAttentionBase from ..kernels.ragged_ops.ragged_testing_utils import build_batch_and_manager -from ...v2.inference_test_utils import allclose +from ...v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') try: from flash_attn.flash_attn_interface import flash_attn_varlen_func diff --git a/tests/unit/inference/v2/modules/test_cuda_pre_ln_module.py b/tests/unit/inference/v2/modules/test_cuda_pre_ln_module.py index 386f3b3ef0b3..dc971f83f0c2 100644 --- a/tests/unit/inference/v2/modules/test_cuda_pre_ln_module.py +++ b/tests/unit/inference/v2/modules/test_cuda_pre_ln_module.py @@ -12,7 +12,10 @@ from deepspeed.inference.v2.modules import ConfigBundle from deepspeed.inference.v2.modules.configs import DSNormConfig from deepspeed.inference.v2.modules.interfaces import DSPreNormRegistry -from ...v2.inference_test_utils import get_dtypes, allclose +from ...v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: Optional[torch.Tensor], gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/modules/test_custom_module.py b/tests/unit/inference/v2/modules/test_custom_module.py index eb54b7a913f2..b813b715ec1e 100644 --- a/tests/unit/inference/v2/modules/test_custom_module.py +++ b/tests/unit/inference/v2/modules/test_custom_module.py @@ -11,7 +11,10 @@ from deepspeed.inference.v2.modules.interfaces import DSPostNormRegistry from deepspeed.inference.v2.modules.configs import DSNormConfig from deepspeed.inference.v2.modules.implementations import cuda_post_ln -from ...v2.inference_test_utils import allclose +from ...v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: torch.Tensor, gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/modules/test_cutlass_moe.py b/tests/unit/inference/v2/modules/test_cutlass_moe.py index b14ba127c6be..27ba70b88728 100644 --- a/tests/unit/inference/v2/modules/test_cutlass_moe.py +++ b/tests/unit/inference/v2/modules/test_cutlass_moe.py @@ -15,7 +15,10 @@ from deepspeed.inference.v2.modules.interfaces import DSMoERegistry from ..kernels.ragged_ops.ragged_testing_utils import build_simple_batch -from ...v2.inference_test_utils import allclose, get_dtypes +from ...v2.inference_test_utils import allclose, get_dtypes, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def _gating_reference(logits: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: diff --git a/tests/unit/inference/v2/modules/test_post_ln_module.py b/tests/unit/inference/v2/modules/test_post_ln_module.py index f9dcfd272170..538eb32c3c85 100644 --- a/tests/unit/inference/v2/modules/test_post_ln_module.py +++ b/tests/unit/inference/v2/modules/test_post_ln_module.py @@ -10,7 +10,10 @@ from deepspeed.inference.v2.modules import ConfigBundle from deepspeed.inference.v2.modules.configs import DSNormConfig from deepspeed.inference.v2.modules.interfaces import DSPostNormRegistry -from ...v2.inference_test_utils import get_dtypes, allclose +from ...v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: torch.Tensor, gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/modules/test_pre_rms_module.py b/tests/unit/inference/v2/modules/test_pre_rms_module.py index bbd108a35a5a..58bf7761bafa 100644 --- a/tests/unit/inference/v2/modules/test_pre_rms_module.py +++ b/tests/unit/inference/v2/modules/test_pre_rms_module.py @@ -12,7 +12,10 @@ from deepspeed.inference.v2.modules import ConfigBundle from deepspeed.inference.v2.modules.configs import DSNormConfig from deepspeed.inference.v2.modules.interfaces import DSPreNormRegistry -from ...v2.inference_test_utils import get_dtypes, allclose +from ...v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: Optional[torch.Tensor], gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/ragged/test_blocked_allocator.py b/tests/unit/inference/v2/ragged/test_blocked_allocator.py index 4596e81c5652..6dddeff6ee9f 100644 --- a/tests/unit/inference/v2/ragged/test_blocked_allocator.py +++ b/tests/unit/inference/v2/ragged/test_blocked_allocator.py @@ -9,7 +9,12 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.ragged.blocked_allocator import BlockedAllocator +from ...v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') @pytest.mark.inference_v2 diff --git a/tests/unit/inference/v2/ragged/test_manager_configs.py b/tests/unit/inference/v2/ragged/test_manager_configs.py index a5f270cced8c..c2dec7673308 100644 --- a/tests/unit/inference/v2/ragged/test_manager_configs.py +++ b/tests/unit/inference/v2/ragged/test_manager_configs.py @@ -7,7 +7,12 @@ from deepspeed.pydantic_v1 import ValidationError +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.ragged import DSStateManagerConfig +from ...v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') @pytest.mark.inference_v2 diff --git a/tests/unit/inference/v2/ragged/test_ragged_wrapper.py b/tests/unit/inference/v2/ragged/test_ragged_wrapper.py index 3cb74f4c49d2..669368f9ef9d 100644 --- a/tests/unit/inference/v2/ragged/test_ragged_wrapper.py +++ b/tests/unit/inference/v2/ragged/test_ragged_wrapper.py @@ -14,6 +14,10 @@ RaggedBatchWrapper, DSStateManagerConfig, ) +from ...v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') @pytest.mark.inference_v2 diff --git a/tests/unit/linear/test_ctx.py b/tests/unit/linear/test_ctx.py new file mode 100644 index 000000000000..e03d13fd6ce2 --- /dev/null +++ b/tests/unit/linear/test_ctx.py @@ -0,0 +1,106 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import deepspeed +import pytest +from unit.common import DistributedTest + +import deepspeed.comm as dist +from deepspeed.linear import LoRAConfig, init_lora +from deepspeed.linear.optimized_linear import LoRAOptimizedLinear +from unit.simple_model import random_dataloader, SimpleModel + +try: + import transformers +except ImportError: + transformers = None + +if transformers is None: + pytest.skip("transformers is required for this test", allow_module_level=True) + + +def injection_assert(model): + # pick out random linear that should have been replaced and initialized + q_proj = model.model.layers[1].self_attn.q_proj + + assert isinstance(q_proj, LoRAOptimizedLinear), "injection did not happen" + assert q_proj._initialized, "lora was not initialized properly" + assert isinstance(q_proj.lora_weight_1, torch.nn.Linear) + assert isinstance(q_proj.lora_weight_2, torch.nn.Linear) + + +class TestEngine(DistributedTest): + world_size = 2 + + def test_model(self): + lora_config = LoRAConfig(lora_r=16, lora_alpha=16, base_weight_sharding=2) + quant_config = None + hidden_dim = 64 + nlayers = 4 + + with deepspeed.linear.Init(lora_config=lora_config, quant_config=quant_config): + model = SimpleModel(hidden_dim=hidden_dim, nlayers=nlayers) + + init_lora(model) + + model_norms = [model.linears[i].weight.norm().item() for i in range(nlayers)] + + ds_config = { + "train_batch_size": 2, + "steps_per_print": 1, + "bf16": { + "enabled": True + }, + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.00015 + } + }, + "zero_optimization": { + "stage": 1 + } + } + model, *_ = deepspeed.initialize(config=ds_config, model=model, model_parameters=model.parameters()) + + engine_norms = [model.module.linears[i].weight.norm().item() for i in range(nlayers)] + + # Ensure that sharded weights are not broadcast during engine init + assert engine_norms == model_norms, f"{dist.get_rank()=} base weight norms are not the same after engine init, {engine_norms=} != {model_norms=}" + + data_loader = random_dataloader(model=model, + total_samples=50, + hidden_dim=hidden_dim, + device=model.device, + dtype=torch.bfloat16) + for n, batch in enumerate(data_loader): + loss = model(batch[0], batch[1]) + model.backward(loss) + model.step() + + +class TestInitTransformers(DistributedTest): + world_size = 2 + + def test_pretrained_init(self): + lora_config = LoRAConfig(lora_r=16, lora_alpha=16, base_weight_sharding=2) + quant_config = None + + with deepspeed.linear.Init(lora_config=lora_config, quant_config=quant_config): + model = transformers.AutoModelForCausalLM.from_pretrained("llamafactory/tiny-random-Llama-3") + + injection_assert(model) + + def test_config_init(self): + lora_config = LoRAConfig(lora_r=16, lora_alpha=16, base_weight_sharding=2) + quant_config = None + + config = transformers.AutoConfig.from_pretrained("llamafactory/tiny-random-Llama-3") + + with deepspeed.linear.Init(lora_config=lora_config, quant_config=quant_config): + model = transformers.AutoModelForCausalLM.from_config(config) + + injection_assert(model) diff --git a/tests/unit/linear/test_linear.py b/tests/unit/linear/test_linear.py index ccd26b4cd726..3f6813d5d92b 100644 --- a/tests/unit/linear/test_linear.py +++ b/tests/unit/linear/test_linear.py @@ -13,6 +13,8 @@ from unit.common import DistributedTest from deepspeed.ops.op_builder import FPQuantizerBuilder +# TODO: [SW-208941] clear gaudi specific code. +from tests.unit.util import get_hpu_dev_version if not deepspeed.ops.__compatible_ops__[FPQuantizerBuilder.NAME]: pytest.skip("FPQuantizer op is not available on this system", allow_module_level=True) @@ -46,7 +48,6 @@ class TestLoRALinear(DistributedTest): def test(self, base_weight_sharding): rank = dist.get_rank() - lora_config = None quantization_config = None input_features = 64 # Number of input features @@ -77,15 +78,17 @@ class TestQuantLinear(DistributedTest): world_size = 2 def test(self, q_bits): - rank = dist.get_rank() - lora_config = None - input_features = 64 # Number of input features output_features = 64 # Number of output features batch_size = 5 # Number of samples in a batch lora_config = None quantization_config = QuantizationConfig(q_bits=q_bits) + quantization_config.q_range_dtype = torch.float8_e4m3fn + quantization_config.q_dtype = torch.float8_e4m3fn + # TODO: [SW-208941] clear gaudi specific code. + if get_hpu_dev_version().lower() == 'gaudi2': + quantization_config.q_range_dtype = torch.float8_e4m3fnuz linear_layer = OptimizedLinear(input_dim=input_features, output_dim=output_features, @@ -106,15 +109,17 @@ class TestOptimizedLinear(DistributedTest): world_size = 2 def test(self, base_weight_sharding, q_bits): - rank = dist.get_rank() - lora_config = None - input_features = 64 # Number of input features output_features = 64 # Number of output features batch_size = 5 # Number of samples in a batch lora_config = LoRAConfig(lora_r=16, lora_alpha=16, base_weight_sharding=base_weight_sharding) quantization_config = QuantizationConfig(q_bits=q_bits) + quantization_config.q_range_dtype = torch.float8_e4m3fn + quantization_config.q_dtype = torch.float8_e4m3fn + # TODO: [SW-208941] clear gaudi specific code. + if get_hpu_dev_version().lower() == 'gaudi2': + quantization_config.q_range_dtype = torch.float8_e4m3fnuz linear_layer = OptimizedLinear(input_dim=input_features, output_dim=output_features, diff --git a/tests/unit/linear/test_quant_param.py b/tests/unit/linear/test_quant_param.py index 9479b3cba8a0..115b5b026703 100644 --- a/tests/unit/linear/test_quant_param.py +++ b/tests/unit/linear/test_quant_param.py @@ -12,6 +12,8 @@ from deepspeed.linear.config import QuantizationConfig from deepspeed.ops.op_builder import FPQuantizerBuilder +# TODO: [SW-208941] clear gaudi specific code. +from tests.unit.util import get_hpu_dev_version from unit.common import DistributedTest @@ -38,11 +40,17 @@ def test_requires_grad(self): def test_move_to_accelerator(self): device = get_accelerator().current_device() data = torch.rand(5, 5, device='cpu', dtype=torch.bfloat16) - qp = QuantizedParameter(data) + quantization_config = QuantizationConfig() + quantization_config.q_range_dtype = torch.float8_e4m3fn + quantization_config.q_dtype = torch.float8_e4m3fn + # TODO: [SW-208941] clear gaudi specific code. + if get_hpu_dev_version().lower() == 'gaudi2': + quantization_config.q_range_dtype = torch.float8_e4m3fnuz + qp = QuantizedParameter(data, quantization_config=quantization_config) assert qp.device == torch.device('cpu') qp = qp.to(get_accelerator().current_device_name()) assert qp.device == torch.device(device) - assert qp.dtype == torch.int8 + assert qp.dtype == torch.float8_e4m3fn def test_hf_clone(self): device = get_accelerator().current_device_name() diff --git a/tests/unit/moe/test_moe.py b/tests/unit/moe/test_moe.py index fdff9430a4e6..dca765b00028 100644 --- a/tests/unit/moe/test_moe.py +++ b/tests/unit/moe/test_moe.py @@ -77,6 +77,8 @@ def test(self, ep_size, zero_stage, use_residual): # E+D -- ep_size = 2 # E only -- ep_size = 4 model = SimpleMoEModel(hidden_dim, ep_size=ep_size, use_residual=use_residual) + #TODO SW-179530: remove workaround when issue with lazy mode is resolved (see SW-179530). + model.to(get_accelerator().device_name()) param_group = {'params': [p for p in model.parameters()], 'name': 'random-unique-name'} params = split_params_into_different_moe_groups_for_optimizer(param_group) optimizer = torch.optim.AdamW(params=params) @@ -92,7 +94,8 @@ def strict_average_tensor(tensor): process_group = optimizer.dp_process_group curr_size = 0 pg_offsets = [] - for i, param, param_id in optimizer.params_in_ipg_bucket: + for i, param_idx, param_id in optimizer.params_in_ipg_bucket: + param = optimizer.bit16_groups[i][param_idx] process_group = optimizer.dp_process_group if optimizer.ipg_bucket_has_moe_params: process_group = optimizer.expert_dp_process_group[param.group_name] if is_moe_param( diff --git a/tests/unit/ops/adam/test_adamw.py b/tests/unit/ops/adam/test_adamw.py index 3b1b088766a5..39f67da55d93 100644 --- a/tests/unit/ops/adam/test_adamw.py +++ b/tests/unit/ops/adam/test_adamw.py @@ -12,6 +12,7 @@ from unit.common import DistributedTest from unit.simple_model import SimpleModel from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import FusedAdamBuilder if torch.half not in get_accelerator().supported_dtypes(): pytest.skip(f"fp16 not supported, valid dtype: {get_accelerator().supported_dtypes()}", allow_module_level=True) @@ -67,6 +68,9 @@ def test(self, "cpu_offload": zero_offload } } + if (resulting_optimizer[0] == FusedAdam) and (not deepspeed.ops.__compatible_ops__[FusedAdamBuilder.NAME]): + pytest.skip("FusedAdam is not compatible") + model = SimpleModel(10) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, diff --git a/tests/unit/ops/aio/test_aio.py b/tests/unit/ops/aio/test_aio.py old mode 100644 new mode 100755 diff --git a/tests/unit/ops/fp_quantizer/test_fp8_gemm.py b/tests/unit/ops/fp_quantizer/test_fp8_gemm.py new file mode 100644 index 000000000000..d26640b0d547 --- /dev/null +++ b/tests/unit/ops/fp_quantizer/test_fp8_gemm.py @@ -0,0 +1,57 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import pytest +import torch + +from deepspeed import get_accelerator +from deepspeed.linear import QuantizationConfig + +# TODO: [SW-208941] clear gaudi specific code. +from tests.unit.util import get_hpu_dev_version + +# [SW-209231] Enable gp8_gemm test +pytest.skip("fp8 gemm (fp8 weight, float 16 input) is currently unimplemented", allow_module_level=True) + +from deepspeed.ops.fp_quantizer import FP_Quantize, matmul_fp8 + + +@pytest.mark.parametrize("dtype", [torch.bfloat16], ids=["bf16"]) +@pytest.mark.parametrize("q_bits", [8], ids=[ + "qbits8", +]) +@pytest.mark.parametrize("M", [1, 2, 4, 8, 32, 64, 128, 256, 512, 1024, 2048]) +def test_fp_quant(dtype, q_bits, M): + device_name = get_accelerator().device_name() + quantization_group_size = 128 + + quant_config = QuantizationConfig() + quant_config.q_range_dtype = torch.float8_e4m3fn + quant_config.q_dtype = torch.float8_e4m3fn + # TODO: [SW-208941] clear gaudi specific code. + if get_hpu_dev_version().lower() == 'gaudi2': + quant_config.q_range_dtype = torch.float8_e4m3fnuz + quant_config.group_size = quantization_group_size + fpq = FP_Quantize(quantization_config=quant_config) + + N = 8192 + H = 4096 + + x = torch.randn(M, H, dtype=dtype, device=device_name) + weight_bf16 = torch.randn(H, N, dtype=dtype, device=device_name) + + weight, _ = fpq.quantize(weight_bf16.data, q_bits=q_bits, return_meta_tensor=True) + scale = fpq.get_scales() + out = matmul_fp8( + x, + weight, + scale, + quantization_group_size, + ) + + out_q = torch.matmul(x, fpq.dequantize(weight, scale=fpq.scale)) + + error = ((out - out_q).abs() / (out.abs() + 1e-5)).sum() / out.numel() + assert 0.004 > error, f"failed on batch-size {M} with error {error}" diff --git a/tests/unit/ops/fp_quantizer/test_fp_quant.py b/tests/unit/ops/fp_quantizer/test_fp_quant.py index bed8bd7e3bcc..354178277f48 100644 --- a/tests/unit/ops/fp_quantizer/test_fp_quant.py +++ b/tests/unit/ops/fp_quantizer/test_fp_quant.py @@ -5,10 +5,15 @@ import pytest import torch +from deepspeed.linear import QuantizationConfig + import deepspeed from deepspeed.ops.fp_quantizer import FP_Quantize from deepspeed.ops.op_builder import FPQuantizerBuilder +from deepspeed.accelerator import get_accelerator +# TODO: [SW-208941] clear gaudi specific code. +from tests.unit.util import get_hpu_dev_version if not deepspeed.ops.__compatible_ops__[FPQuantizerBuilder.NAME]: pytest.skip("FPQuantizer op is not available on this system", allow_module_level=True) @@ -17,7 +22,7 @@ from qtorch.quant import float_quantize -def qtorch_quantize(input, exp_bits=4, man_bits=3, rounding="nearest", group_size=1024): +def qtorch_quantize(input, exp_bits=4, man_bits=3, rounding="nearest", group_size=1024, quant_config=None): ori_dt = input.dtype ori_shape = input.shape last_dim = group_size @@ -26,7 +31,7 @@ def qtorch_quantize(input, exp_bits=4, man_bits=3, rounding="nearest", group_siz q_bits = exp_bits + man_bits + 1 input_to_float = input.float() if q_bits == 8: - q_range = 480. + q_range = torch.finfo(quant_config.q_range_dtype).max elif q_bits == 6: q_range = 28. elif q_bits == 12: @@ -41,20 +46,33 @@ def qtorch_quantize(input, exp_bits=4, man_bits=3, rounding="nearest", group_siz @pytest.mark.parametrize("dtype", [torch.bfloat16], ids=["bf16"]) def test_fp_quant_meta(dtype): + device_name = get_accelerator().device_name() group_size = 128 q_bits = 8 exp_bits = 4 man_bits = 3 - fpq = FP_Quantize(group_size=group_size) + quant_config = QuantizationConfig() + quant_config.q_range_dtype = torch.float8_e4m3fn + quant_config.q_dtype = torch.float8_e4m3fn + # TODO: [SW-208941] clear gaudi specific code. + if get_hpu_dev_version().lower() == 'gaudi2': + quant_config.q_range_dtype = torch.float8_e4m3fnuz + quant_config.group_size = group_size + fpq = FP_Quantize(quantization_config=quant_config) + for i in range(10): - x = torch.rand(4, 1024, dtype=dtype, device='cuda') + x = torch.rand(4, 1024, dtype=dtype) - ds_x = x.clone() + ds_x = x.clone().to(device_name) x_quantized, meta_tensor = fpq.quantize(ds_x, q_bits=q_bits, return_meta_tensor=True) x_dequantized = fpq.dequantize(x_quantized, q_bits=q_bits, scale=meta_tensor) - qtorch_out = qtorch_quantize(x, exp_bits=exp_bits, man_bits=man_bits, group_size=group_size) + qtorch_out = qtorch_quantize(x, + exp_bits=exp_bits, + man_bits=man_bits, + group_size=group_size, + quant_config=quant_config) qtorch_error = (qtorch_out - x).abs().sum() / x.numel() ds_error = (x_dequantized - x).abs().sum() / x.numel() @@ -68,12 +86,22 @@ def test_fp_quant_selective(dtype): exp_bits = 4 man_bits = 3 - fpq = FP_Quantize(group_size=group_size) - indexes = torch.zeros(2, dtype=torch.int32, device='cuda') + device_name = get_accelerator().device_name() + + quant_config = QuantizationConfig() + quant_config.q_range_dtype = torch.float8_e4m3fn + quant_config.q_dtype = torch.float8_e4m3fn + # TODO: [SW-208941] clear gaudi specific code. + if get_hpu_dev_version().lower() == 'gaudi2': + quant_config.q_range_dtype = torch.float8_e4m3fnuz + quant_config.group_size = group_size + fpq = FP_Quantize(quantization_config=quant_config) + + indexes = torch.zeros(2, dtype=torch.int32, device=device_name) indexes[0] = 1 indexes[1] = 3 for i in range(10): - x = torch.rand(4, 1024, dtype=dtype, device='cuda') + x = torch.rand(4, 1024, dtype=dtype, device=device_name) x = x.reshape(4, 1, x.shape[-1]) ds_x = x.clone() @@ -93,13 +121,21 @@ def test_fp_quant_selective(dtype): @pytest.mark.parametrize("dtype", [torch.bfloat16], ids=["bf16"]) @pytest.mark.parametrize("q_bits", [8, 6, 12], ids=["qbits8", "qbits6", "qbits12"]) def test_fp_quant(dtype, q_bits): - group_size = 128 - fpq = FP_Quantize(group_size=group_size) + device_name = get_accelerator().device_name() + + quant_config = QuantizationConfig() + quant_config.q_range_dtype = torch.float8_e4m3fn + quant_config.q_dtype = torch.float8_e4m3fn + # TODO: [SW-208941] clear gaudi specific code. + if get_hpu_dev_version().lower() == 'gaudi2': + quant_config.q_range_dtype = torch.float8_e4m3fnuz + quant_config.group_size = 128 + fpq = FP_Quantize(quantization_config=quant_config) for i in range(10): - x = torch.rand(4, 1024, dtype=dtype, device='cuda') + x = torch.rand(4, 1024, dtype=dtype) - ds_x = x.clone() + ds_x = x.clone().to(device_name) x_quantized = fpq.quantize(ds_x, q_bits=q_bits) x_dequantized = fpq.dequantize(x_quantized, q_bits=q_bits) @@ -115,7 +151,11 @@ def test_fp_quant(dtype, q_bits): else: raise ValueError(f"unknown {q_bits=}") - qtorch_out = qtorch_quantize(x, exp_bits=exp_bits, man_bits=man_bits, group_size=group_size) + qtorch_out = qtorch_quantize(x, + exp_bits=exp_bits, + man_bits=man_bits, + group_size=quant_config.group_size, + quant_config=quant_config) qtorch_error = (qtorch_out - x).abs().sum() / x.numel() ds_error = (x_dequantized - x).abs().sum() / x.numel() diff --git a/tests/unit/ops/transformer/inference/inference_test_utils.py b/tests/unit/ops/transformer/inference/inference_test_utils.py index 9c7b428c0e68..9cfcae809f09 100644 --- a/tests/unit/ops/transformer/inference/inference_test_utils.py +++ b/tests/unit/ops/transformer/inference/inference_test_utils.py @@ -26,12 +26,7 @@ def get_tolerances(): def get_dtypes(): global DTYPES if DTYPES is None: - DTYPES = [torch.float16, torch.float32] - try: - if get_accelerator().is_bf16_supported(): - DTYPES.append(torch.bfloat16) - except (AssertionError, AttributeError): - pass + DTYPES = get_accelerator().supported_dtypes() return DTYPES diff --git a/tests/unit/ops/transformer/inference/test_attention.py b/tests/unit/ops/transformer/inference/test_attention.py index 0521245bcdf3..ecf681542ff6 100644 --- a/tests/unit/ops/transformer/inference/test_attention.py +++ b/tests/unit/ops/transformer/inference/test_attention.py @@ -27,8 +27,8 @@ def ref_torch_attention(q, k, v, mask, sm_scale): @pytest.mark.parametrize("causal", [True, False]) @pytest.mark.parametrize("use_flash", [True, False]) def test_attention(BATCH, H, N_CTX, D_HEAD, causal, use_flash, dtype=torch.float16): - if not deepspeed.HAS_TRITON: - pytest.skip("triton has to be installed for the test") + if not deepspeed.get_accelerator().is_triton_supported(): + pytest.skip("triton is not supported on this system") minus_inf = -65504.0 dev = deepspeed.accelerator.get_accelerator().device_name() diff --git a/tests/unit/ops/transformer/inference/test_bias_add.py b/tests/unit/ops/transformer/inference/test_bias_add.py index 843c9b889c2b..f25bbc1be692 100644 --- a/tests/unit/ops/transformer/inference/test_bias_add.py +++ b/tests/unit/ops/transformer/inference/test_bias_add.py @@ -8,12 +8,13 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer import DeepSpeedInferenceConfig +from deepspeed.ops.transformer.inference.op_binding.bias_add import BiasAddOp from .inference_test_utils import allclose, get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -22,15 +23,8 @@ def run_bias_add_reference(activations, bias): def run_bias_add_ds(activations, bias): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - if activations.dtype == torch.float16: - return inference_module.bias_add_fp16(activations, bias) - elif activations.dtype == torch.bfloat16: - return inference_module.bias_add_bf16(activations, bias) - else: - return inference_module.bias_add_fp32(activations, bias) + config = DeepSpeedInferenceConfig(dtype=activations.dtype) + return BiasAddOp(config)(activations, bias) @pytest.mark.inference_ops diff --git a/tests/unit/ops/transformer/inference/test_bias_geglu.py b/tests/unit/ops/transformer/inference/test_bias_geglu.py index d5ab13964974..05de4fbb4cf8 100644 --- a/tests/unit/ops/transformer/inference/test_bias_geglu.py +++ b/tests/unit/ops/transformer/inference/test_bias_geglu.py @@ -8,13 +8,13 @@ import deepspeed from deepspeed.ops.op_builder import InferenceBuilder from deepspeed.accelerator import get_accelerator +from deepspeed.ops.transformer.inference.op_binding.gated_activation import GatedActivationOp from deepspeed.utils.types import ActivationFuncType from .inference_test_utils import allclose, get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -27,10 +27,7 @@ def run_bias_geglu_reference(activations, bias): def run_bias_geglu_ds(activation, bias): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.gated_activation(activation, bias, ActivationFuncType.GATED_GELU) + return GatedActivationOp()(activation, bias, ActivationFuncType.GATED_GELU) @pytest.mark.inference_ops @@ -56,17 +53,14 @@ def run_gated_silu_reference(activations, bias): def run_gated_silu_ds(activation, bias): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.gated_activation(activation, bias, ActivationFuncType.GATED_SILU) + return GatedActivationOp()(activation, bias, ActivationFuncType.GATED_SILU) @pytest.mark.inference_ops @pytest.mark.parametrize("batch", [1, 2]) @pytest.mark.parametrize("sequence", [1, 128, 255]) @pytest.mark.parametrize("channels", [512, 1232, 4096]) -@pytest.mark.parametrize("dtype", [torch.float16, torch.float32]) +@pytest.mark.parametrize("dtype", get_dtypes()) def test_gated_silu(batch, sequence, channels, dtype): activation = torch.randn((batch, sequence, channels * 2), dtype=dtype, device=get_accelerator().device_name()) bias = torch.randn((channels * 2), dtype=dtype, device=get_accelerator().device_name()) diff --git a/tests/unit/ops/transformer/inference/test_bias_gelu.py b/tests/unit/ops/transformer/inference/test_bias_gelu.py index fd82da51380c..b69030e87ace 100644 --- a/tests/unit/ops/transformer/inference/test_bias_gelu.py +++ b/tests/unit/ops/transformer/inference/test_bias_gelu.py @@ -8,13 +8,14 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer import DeepSpeedInferenceConfig +from deepspeed.ops.transformer.inference.op_binding.bias_gelu import BiasGeluOp from .inference_test_utils import allclose, get_dtypes from packaging import version as pkg_version if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -25,15 +26,8 @@ def run_bias_gelu_reference(activations, bias): def run_bias_gelu_ds(activations, bias): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - if activations.dtype == torch.float16: - return inference_module.bias_gelu_fp16(activations, bias) - elif activations.dtype == torch.bfloat16: - return inference_module.bias_gelu_bf16(activations, bias) - else: - return inference_module.bias_gelu_fp32(activations, bias) + config = DeepSpeedInferenceConfig(dtype=activations.dtype) + return BiasGeluOp(config)(activations, bias) @pytest.mark.inference_ops diff --git a/tests/unit/ops/transformer/inference/test_bias_relu.py b/tests/unit/ops/transformer/inference/test_bias_relu.py index 881af78e92cf..57134665b241 100644 --- a/tests/unit/ops/transformer/inference/test_bias_relu.py +++ b/tests/unit/ops/transformer/inference/test_bias_relu.py @@ -8,12 +8,13 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer import DeepSpeedInferenceConfig +from deepspeed.ops.transformer.inference.op_binding.bias_relu import BiasReluOp from .inference_test_utils import allclose, get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -23,15 +24,8 @@ def run_bias_relu_reference(activations, bias): def run_bias_relu_ds(activations, bias): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - if activations.dtype == torch.float16: - return inference_module.bias_relu_fp16(activations, bias) - elif activations.dtype == torch.bfloat16: - return inference_module.bias_relu_bf16(activations, bias) - else: - return inference_module.bias_relu_fp32(activations, bias) + config = DeepSpeedInferenceConfig(dtype=activations.dtype) + return BiasReluOp(config)(activations, bias) @pytest.mark.inference_ops diff --git a/tests/unit/ops/transformer/inference/test_gelu.py b/tests/unit/ops/transformer/inference/test_gelu.py index 675860b00bdb..5f820ef3b579 100644 --- a/tests/unit/ops/transformer/inference/test_gelu.py +++ b/tests/unit/ops/transformer/inference/test_gelu.py @@ -7,11 +7,12 @@ import torch import deepspeed from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer import DeepSpeedInferenceConfig +from deepspeed.ops.transformer.inference.op_binding.bias_gelu import BiasGeluOp if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -45,13 +46,8 @@ def run_gelu_ds(activations, use_triton_ops=False): device = deepspeed.accelerator.get_accelerator().device_name() channels = activations.shape[-1] bias = torch.zeros((channels), dtype=activations.dtype, device=device) - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - if activations.dtype == torch.float16: - return inference_module.bias_gelu_fp16(activations, bias) - else: - return inference_module.bias_gelu_fp32(activations, bias) + config = DeepSpeedInferenceConfig(dtype=activations.dtype) + return BiasGeluOp(config)(activations, bias) @pytest.mark.inference_ops @@ -65,8 +61,8 @@ def test_gelu(batch, sequence, channels, dtype, use_triton_ops): activations_ds = torch.randn((batch, sequence, channels), dtype=dtype, device=device) activations_ref = activations_ds.clone().detach() - if not deepspeed.HAS_TRITON and use_triton_ops: - pytest.skip("triton has to be installed for the test") + if not deepspeed.get_accelerator().is_triton_supported(): + pytest.skip("triton is not supported on this system") ds_out = run_gelu_ds(activations_ds, use_triton_ops) ref_out = run_gelu_reference(activations_ref) assert (allclose(ds_out, ref_out)) diff --git a/tests/unit/ops/transformer/inference/test_layer_norm.py b/tests/unit/ops/transformer/inference/test_layer_norm.py index 9eac612aa29c..7711daf0d887 100644 --- a/tests/unit/ops/transformer/inference/test_layer_norm.py +++ b/tests/unit/ops/transformer/inference/test_layer_norm.py @@ -8,6 +8,7 @@ import pytest from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer.inference.op_binding.layer_norm import LayerNormOp from .inference_test_utils import allclose, get_dtypes, assert_almost_equal try: import triton # noqa: F401 # type: ignore @@ -21,8 +22,6 @@ if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None - def ref_implementation(vals, gamma, beta, epsilon, channels, dtype): vals_f = vals.to(torch.float32) @@ -32,10 +31,7 @@ def ref_implementation(vals, gamma, beta, epsilon, channels, dtype): def ds_implementation(vals, gamma, beta, epsilon): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.layer_norm(vals, gamma, beta, epsilon) + return LayerNormOp()(vals, gamma, beta, epsilon) def ds_triton_implementation(vals, gamma, beta, epsilon): @@ -49,8 +45,8 @@ def ds_triton_implementation(vals, gamma, beta, epsilon): @pytest.mark.parametrize("dtype", get_dtypes()) @pytest.mark.parametrize("use_triton_ops", [False, True]) def test_layer_norm(batch, seq_len, channels, dtype, use_triton_ops): - if not deepspeed.HAS_TRITON and use_triton_ops: - pytest.skip("triton has to be installed for the test") + if not deepspeed.get_accelerator().is_triton_supported(): + pytest.skip("triton is not supported on this system") vals = torch.randn((batch, seq_len, channels), dtype=dtype, device=get_accelerator().current_device_name()) gamma = torch.randn((channels), dtype=dtype, device=get_accelerator().current_device_name()) @@ -83,10 +79,7 @@ def residual_ref_implementation(vals, bias, res, gamma, beta, epsilon, channels, def residual_ds_implementation(vals, bias, res, gamma, beta, epsilon): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module._layer_norm_residual(vals, bias, res, gamma, beta, epsilon) + return LayerNormOp.layer_norm_residual(vals, bias, res, gamma, beta, epsilon) def residual_ds_triton_implementation(vals, bias, res, gamma, beta, epsilon): @@ -100,8 +93,8 @@ def residual_ds_triton_implementation(vals, bias, res, gamma, beta, epsilon): @pytest.mark.parametrize("dtype", get_dtypes()) @pytest.mark.parametrize("use_triton_ops", [False, True]) def test_layer_norm_residual(batch, seq_len, channels, dtype, use_triton_ops): - if not deepspeed.HAS_TRITON and use_triton_ops: - pytest.skip("triton has to be installed for the test") + if not deepspeed.get_accelerator().is_triton_supported(): + pytest.skip("triton is not supported on this system") vals = torch.randn((batch, seq_len, channels), dtype=dtype, device=get_accelerator().current_device_name()) residual = torch.randn((batch, seq_len, channels), dtype=dtype, device=get_accelerator().current_device_name()) @@ -137,10 +130,7 @@ def residual_store_ref_implementation(vals, bias, res, gamma, beta, epsilon, cha def residual_store_ds_implementation(vals, bias, res, gamma, beta, epsilon): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.layer_norm_residual_store_pre_ln_res(vals, bias, res, gamma, beta, epsilon) + return LayerNormOp.layer_norm_residual_store_pre_ln_res(vals, bias, res, gamma, beta, epsilon) @pytest.mark.inference_ops @@ -173,8 +163,8 @@ def test_layer_norm_residual_store_pre_ln_res(batch, seq_len, channels, dtype): @pytest.mark.parametrize("residual", [True, False]) @pytest.mark.parametrize("input_bias", [True, False]) def test_triton_layer_norm(M, N, dtype, residual, input_bias, eps=1e-5, device='cuda'): - if not deepspeed.HAS_TRITON: - pytest.skip("triton has to be installed for the test") + if not deepspeed.get_accelerator().is_triton_supported(): + pytest.skip("triton is not supported on this system") dev = get_accelerator().device_name() torch.manual_seed(0) # create data diff --git a/tests/unit/ops/transformer/inference/test_matmul.py b/tests/unit/ops/transformer/inference/test_matmul.py index 804a85750a3a..559aa2c60afe 100644 --- a/tests/unit/ops/transformer/inference/test_matmul.py +++ b/tests/unit/ops/transformer/inference/test_matmul.py @@ -42,8 +42,8 @@ def run_matmul_ds(a, b, use_triton_ops=False): @pytest.mark.parametrize("dtype", [torch.float16]) @pytest.mark.parametrize("use_triton_ops", [True]) def test_matmul_4d(B, H, M, K, N, dtype, use_triton_ops): - if not deepspeed.HAS_TRITON and use_triton_ops: - pytest.skip("triton has to be installed for the test") + if not deepspeed.get_accelerator().is_triton_supported(): + pytest.skip("triton is not supported on this system") # skip autotune in testing from deepspeed.ops.transformer.inference.triton.matmul_ext import fp16_matmul diff --git a/tests/unit/ops/transformer/inference/test_moe_res_matmult.py b/tests/unit/ops/transformer/inference/test_moe_res_matmult.py index e1c8127a83ac..dcf9f16baaf1 100644 --- a/tests/unit/ops/transformer/inference/test_moe_res_matmult.py +++ b/tests/unit/ops/transformer/inference/test_moe_res_matmult.py @@ -8,24 +8,20 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer.inference.op_binding.moe_res_matmul import MoEResMatmulOp from .inference_test_utils import allclose, get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None - def run_moe_res_matmul_reference(residual, coef1, coef2, output): return residual * coef1 + output * coef2 def run_moe_res_matmul_ds(residual, coef, output): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() coef_t = coef.transpose(-1, -2).contiguous() - return inference_module.moe_res_matmul(residual, coef_t, output) + return MoEResMatmulOp()(residual, coef_t, output) @pytest.mark.inference_ops diff --git a/tests/unit/ops/transformer/inference/test_residual_add.py b/tests/unit/ops/transformer/inference/test_residual_add.py index 91830e25fc81..cab64d1d0555 100644 --- a/tests/unit/ops/transformer/inference/test_residual_add.py +++ b/tests/unit/ops/transformer/inference/test_residual_add.py @@ -8,6 +8,8 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer import DeepSpeedInferenceConfig +from deepspeed.ops.transformer.inference.op_binding import ResidualAddOp from .inference_test_utils import get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: @@ -36,11 +38,6 @@ def allclose(x, y): return torch.allclose(x, y, rtol=rtol, atol=atol) -@pytest.fixture(scope="module") -def inference_module(): - return InferenceBuilder().load() - - def res_add_bias_ref(hidden_state, residual, attn_output, attn_bias, final_bias, mp_size=1, pre_attn_norm=True): if pre_attn_norm: hidden_state += (residual + final_bias + attn_output + attn_bias) / mp_size @@ -75,10 +72,10 @@ def run_residual_add_reference(hidden_state, residual, attn_output, attn_bias, f @pytest.mark.parametrize("mp_size", [1, 2]) @pytest.mark.parametrize("pre_attn_norm", [True, False]) @pytest.mark.parametrize("use_triton_ops", [True, False]) -def test_residual_add(inference_module, batch, sequence, hidden_dim, dtype, mlp_after_attn, add_bias, mp_size, - pre_attn_norm, use_triton_ops): - if not deepspeed.HAS_TRITON and use_triton_ops: - pytest.skip("triton has to be installed for the test") +def test_residual_add(batch, sequence, hidden_dim, dtype, mlp_after_attn, add_bias, mp_size, pre_attn_norm, + use_triton_ops): + if not deepspeed.get_accelerator().is_triton_supported(): + pytest.skip("triton is not supported on this system") ds_out = torch.randn((batch, sequence, hidden_dim), dtype=dtype, device=get_accelerator().device_name()) residual = torch.randn((batch, sequence, hidden_dim), dtype=dtype, device=get_accelerator().device_name()) attn_output = torch.randn((batch, sequence, hidden_dim), dtype=dtype, device=get_accelerator().device_name()) @@ -96,19 +93,9 @@ def test_residual_add(inference_module, batch, sequence, hidden_dim, dtype, mlp_ if use_triton_ops: from deepspeed.ops.transformer.inference.triton import residual_add_bias ds_out = residual_add_bias(*res_add_args) - if dtype == torch.float16: - ds_out = inference_module.residual_add_bias_fp16(*res_add_args) - elif dtype == torch.float32: - ds_out = inference_module.residual_add_bias_fp32(*res_add_args) - elif dtype == torch.bfloat16: - ds_out = inference_module.residual_add_bias_bf16(*res_add_args) else: - if dtype == torch.float16: - ds_out = inference_module.residual_add_bias_fp16(*res_add_args) - elif dtype == torch.float32: - ds_out = inference_module.residual_add_bias_fp32(*res_add_args) - else: - raise ValueError(f"Unsupported dtype: {dtype}") + config = DeepSpeedInferenceConfig(dtype=dtype) + ds_out = ResidualAddOp(config).residual_add_func(*res_add_args) if not allclose(ds_out, ref_out): print((ds_out - ref_out).abs().max()) diff --git a/tests/unit/ops/transformer/inference/test_rms_norm.py b/tests/unit/ops/transformer/inference/test_rms_norm.py index 508a40e12e8d..ed500ec16f9f 100644 --- a/tests/unit/ops/transformer/inference/test_rms_norm.py +++ b/tests/unit/ops/transformer/inference/test_rms_norm.py @@ -8,13 +8,13 @@ import pytest from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder # type: ignore +from deepspeed.ops.transformer.inference.op_binding.pre_rms_norm import PreRMSNormOp +from deepspeed.ops.transformer.inference.op_binding.rms_norm import RMSNormOp from .inference_test_utils import allclose, get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None - def ref_implementation(vals, gamma, epsilon): variance = vals.to(torch.float32).pow(2).mean(-1, keepdim=True) @@ -27,10 +27,7 @@ def ref_implementation(vals, gamma, epsilon): def ds_implementation(vals, gamma, epsilon): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.rms_norm(vals, gamma, epsilon) + return RMSNormOp()(vals, gamma, epsilon) @pytest.mark.inference_ops @@ -51,10 +48,7 @@ def test_rms_norm(batch, seq_len, channels, dtype): def pre_ds_implementation(vals, residual, gamma, epsilon): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.pre_rms_norm(vals, residual, gamma, epsilon) + return PreRMSNormOp()(vals, residual, gamma, epsilon) def pre_ref_implementation(vals, residual, gamma, epsilon): @@ -74,7 +68,7 @@ def pre_ref_implementation(vals, residual, gamma, epsilon): @pytest.mark.parametrize("batch", [1, 32]) @pytest.mark.parametrize("seq_len", [1, 128]) @pytest.mark.parametrize("channels", [384, 512, 768, 1024, 2048, 8192, 14432]) -@pytest.mark.parametrize("dtype", [torch.float16, torch.float32]) +@pytest.mark.parametrize("dtype", get_dtypes()) def test_pre_norm(batch, seq_len, channels, dtype): device = get_accelerator().current_device_name() vals = torch.randn((batch, seq_len, channels), dtype=dtype, device=device) diff --git a/tests/unit/ops/transformer/inference/test_softmax.py b/tests/unit/ops/transformer/inference/test_softmax.py index 7d0d6e14b651..98db65088d65 100644 --- a/tests/unit/ops/transformer/inference/test_softmax.py +++ b/tests/unit/ops/transformer/inference/test_softmax.py @@ -11,7 +11,6 @@ if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -41,8 +40,8 @@ def run_softmax_ds(input, use_triton_ops=False): @pytest.mark.parametrize("dtype", [torch.float16, torch.float32]) @pytest.mark.parametrize("use_triton_ops", [True]) def test_softmax(batch, sequence, channels, dtype, use_triton_ops): - if not deepspeed.HAS_TRITON and use_triton_ops: - pytest.skip("triton has to be installed for the test") + if not deepspeed.get_accelerator().is_triton_supported(): + pytest.skip("triton is not supported on this system") device = deepspeed.accelerator.get_accelerator().device_name() input_ds = torch.randn((batch, sequence, channels), dtype=dtype, device=device) input_ref = input_ds.clone().detach() diff --git a/tests/unit/runtime/half_precision/test_bf16.py b/tests/unit/runtime/half_precision/test_bf16.py index 0af14abc3be5..99339e35ba4f 100644 --- a/tests/unit/runtime/half_precision/test_bf16.py +++ b/tests/unit/runtime/half_precision/test_bf16.py @@ -10,7 +10,7 @@ from unit.common import DistributedTest from deepspeed.ops.op_builder import CPUAdamBuilder from unit.simple_model import SimpleModel, SimpleOptimizer, random_dataloader -from unit.util import bf16_required_version_check +from unit.util import bf16_required_version_check, hpu_lazy_enabled from deepspeed import comm as dist from deepspeed.accelerator import get_accelerator @@ -197,6 +197,9 @@ def test(self, optimizer_constructor, zero_stage=2): hidden_dim = 10 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + model.to(device) client_optimizer = optimizer_constructor(params=model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=client_optimizer) @@ -275,6 +278,9 @@ def test(self, stage=2): hidden_dim = 10 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().current_device_name() + model.to(device) optimizer = torch.optim.Adam(model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer) data_loader = random_dataloader(model=model, @@ -300,9 +306,10 @@ def test(self, comp_type, comm_type): " DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly" ) - if comp_type == torch.float16 or comm_type == torch.float16: - if not get_accelerator().is_fp16_supported(): - pytest.skip("fp16 is not supported") + if comm_type and (comp_type not in get_accelerator().supported_dtypes() + or comm_type not in get_accelerator().supported_dtypes()): + pytest.skip( + f"comp_type:{comp_type}, comm_type:{comm_type} not supported by {get_accelerator().device_name()}.") type_str = {torch.float16: "fp16", torch.bfloat16: "bf16"} @@ -326,6 +333,12 @@ def test(self, comp_type, comm_type): hidden_dim = 10 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + # TODO: remove this when the following is resolved: + # https://jira.habana-labs.com/browse/SW-137450 + config_dict["fp16"]["initial_scale_power"] = 30 + device = get_accelerator().current_device_name() + model.to(device) optimizer = torch.optim.Adam(model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer) data_loader = random_dataloader(model=model, diff --git a/tests/unit/runtime/half_precision/test_fp16.py b/tests/unit/runtime/half_precision/test_fp16.py index dba15a969459..7690a8e3a2c7 100644 --- a/tests/unit/runtime/half_precision/test_fp16.py +++ b/tests/unit/runtime/half_precision/test_fp16.py @@ -12,7 +12,8 @@ from unit.simple_model import SimpleModel, SimpleOptimizer, random_dataloader, SimpleMoEModel, sequence_dataloader from deepspeed.utils.torch import required_torch_version from deepspeed.accelerator import get_accelerator -from deepspeed.ops.op_builder import CPUAdamBuilder, FusedLambBuilder +from deepspeed.ops.op_builder import CPUAdamBuilder, FusedLambBuilder, FusedAdamBuilder +from unit.util import hpu_lazy_enabled from deepspeed.moe.utils import split_params_into_different_moe_groups_for_optimizer try: @@ -26,6 +27,7 @@ pytest.skip(f"fp16 not supported, valid dtype: {get_accelerator().supported_dtypes()}", allow_module_level=True) +@pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") class TestLambFP32GradClip(DistributedTest): world_size = 2 @@ -60,6 +62,7 @@ def test(self): model.step() +@pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") class TestLambFP16(DistributedTest): world_size = 2 @@ -207,6 +210,8 @@ def mock_unscale_and_clip_grads(total_norm, apply_scale=True): engine.backward(loss) engine.step() + @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedAdamBuilder.NAME], + reason="fused adam is not compatible") def test_fused_gradnorm(self, monkeypatch): if not get_accelerator().is_fp16_supported(): pytest.skip("fp16 is not supported") @@ -240,6 +245,7 @@ def mock_unscale_and_clip_grads(grads_groups_flat, total_norm, apply_scale=True) engine.backward(loss) engine.step() + @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") @pytest.mark.parametrize("fused_lamb_legacy", [(False), (True)]) @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="FusedLambBuilder has not been implemented on this system.") @@ -613,6 +619,9 @@ def test(self, zero_stage, optimizer_constructor): hidden_dim = 10 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + model.to(device) client_optimizer = optimizer_constructor(params=model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=client_optimizer) @@ -748,6 +757,9 @@ def test(self, stage): hidden_dim = 10 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + model.to(device) optimizer = torch.optim.Adam(model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) diff --git a/tests/unit/runtime/test_autocast.py b/tests/unit/runtime/test_autocast.py index 9176770afda7..9692f105d69d 100644 --- a/tests/unit/runtime/test_autocast.py +++ b/tests/unit/runtime/test_autocast.py @@ -25,6 +25,7 @@ def test_missing_amp_autocast(self, half_op): output = ds_linear(input) assert output.dtype == ds_linear.weight.dtype + @pytest.mark.skipif(get_accelerator().amp() is None, reason='amp is not installed') def test_disable_autocast_linear(self, half_op): amp = get_accelerator().amp() diff --git a/tests/unit/runtime/test_ds_config_dict.py b/tests/unit/runtime/test_ds_config_dict.py index c11c63d04867..764563f4da31 100644 --- a/tests/unit/runtime/test_ds_config_dict.py +++ b/tests/unit/runtime/test_ds_config_dict.py @@ -9,6 +9,7 @@ import json import hjson import argparse +import torch from deepspeed.runtime.zero.config import DeepSpeedZeroConfig from deepspeed.accelerator import get_accelerator @@ -256,7 +257,11 @@ def test(self, base_config): model = SimpleModel(hidden_dim=hidden_dim) model, _, _, _ = deepspeed.initialize(config=base_config, model=model) - data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=5, + hidden_dim=hidden_dim, + device=model.device, + dtype=torch.half) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) with pytest.raises(AssertionError): diff --git a/tests/unit/runtime/test_ds_initialize.py b/tests/unit/runtime/test_ds_initialize.py index 9ff99f169f7a..d5a070dc691d 100644 --- a/tests/unit/runtime/test_ds_initialize.py +++ b/tests/unit/runtime/test_ds_initialize.py @@ -11,7 +11,7 @@ from unit.simple_model import SimpleModel, random_dataloader from unit.common import DistributedTest -from unit.util import bf16_required_version_check, required_amp_check +from unit.util import bf16_required_version_check, required_amp_check, hpu_lazy_enabled import deepspeed from deepspeed.ops.adam import FusedAdam @@ -230,6 +230,9 @@ def test(self, optimizer_extension, model_dtype, grad_accum_dtype): hidden_dim = 10 model = SimpleModel(hidden_dim) + # TODO: SW-145674 remove this WA when SW-145671 is resolved. + if hpu_lazy_enabled(): + model.to(get_accelerator().device_name()) model_parameters = list(model.parameters()) if key in is_supported: diff --git a/tests/unit/runtime/zero/test_zero.py b/tests/unit/runtime/zero/test_zero.py index 7262a1b2c998..34f243f25978 100644 --- a/tests/unit/runtime/zero/test_zero.py +++ b/tests/unit/runtime/zero/test_zero.py @@ -25,6 +25,7 @@ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint from deepspeed.runtime.zero.utils import ZeRORuntimeException from deepspeed.accelerator import get_accelerator +from unit.util import hpu_lazy_enabled def run_unbalanced_gradients(model, data_loader): @@ -289,6 +290,8 @@ def forward(self, x, y): world_size = dist.get_world_size() n_layers = world_size * 2 model = MyModel(hidden_dim=hidden_dim, n_layers=n_layers, freeze_params=freeze_params) + if hpu_lazy_enabled(): + model.to(get_accelerator().device_name()) optim_groups = [ { @@ -987,8 +990,8 @@ class ModelWhereParentInitializesChildWeights(Module): def __init__(self) -> None: super().__init__() - - self.linear = Linear(12, 1) + dev = get_accelerator().device_name() + self.linear = Linear(12, 1, device=dev) self.apply(self.__init_weights) @@ -1522,6 +1525,9 @@ def test(self, force_ds_optim): hidden_dim = 10 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().current_device_name() + model.to(device) optimizer = torch.optim.Adam(model.parameters()) diff --git a/tests/unit/skip_marker.py b/tests/unit/skip_marker.py new file mode 100644 index 000000000000..685c84805d40 --- /dev/null +++ b/tests/unit/skip_marker.py @@ -0,0 +1,440 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +hpu_lazy_skip_tests = {} + +g1_lazy_skip_tests = { + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "HPU is not supported on deepspeed-mii", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-False]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-False]": + "Skip workload takes longer time to run", + "unit/linear/test_ctx.py::TestEngine::test_model": "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestOptimizedLinear::test[qbit6-bws2]": "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestOptimizedLinear::test[qbit8-bws2]": "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestLoRALinear::test[2]": "Skip on G1 due to SW-209651", + "unit/linear/test_ctx.py::TestInitTransformers::test_pretrained_init": "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestBasicLinear::test": "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestOptimizedLinear::test[qbit8-bws1]": "Skip on G1 due to SW-209651", + "unit/linear/test_ctx.py::TestInitTransformers::test_config_init": "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestLoRALinear::test[1]": "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestQuantLinear::test[8]": "Skip on G1 due to SW-209651", + "unit/linear/test_quant_param.py::TestQuantParam::test_move_to_accelerator": "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestQuantLinear::test[6]": "Skip on G1 due to SW-209651", + "unit/linear/test_quant_param.py::TestQuantParam::test_unsupported_dtypes[dtype0]": "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestOptimizedLinear::test[qbit6-bws1]": "Skip on G1 due to SW-209651", + "unit/linear/test_quant_param.py::TestQuantParam::test_requires_grad": "Skip on G1 due to SW-209651", + "unit/linear/test_quant_param.py::TestQuantParam::test_unsupported_dtypes[dtype1]": "Skip on G1 due to SW-209651", + "unit/linear/test_quant_param.py::TestQuantParam::test_hf_clone": "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[2048-qbits8-bf16]": "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[64-qbits8-bf16]": "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[2-qbits8-bf16]": "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[256-qbits8-bf16]": "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[1-qbits8-bf16]": "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[128-qbits8-bf16]": "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[1024-qbits8-bf16]": "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[8-qbits8-bf16]": "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[32-qbits8-bf16]": "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[4-qbits8-bf16]": "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[512-qbits8-bf16]": "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp_quant.py::test_fp_quant_selective[bf16]": "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp_quant.py::test_fp_quant[qbits12-bf16]": "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp_quant.py::test_fp_quant_meta[bf16]": "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp_quant.py::test_fp_quant[qbits6-bf16]": "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp_quant.py::test_fp_quant[qbits8-bf16]": "Skip on G1 due to SW-209651", +} + +g2_lazy_skip_tests = { + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": "Stuck, SW-190067.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[4-9-1024]": "stuck, SW-190067.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[4-9-1024]": + "Stuck, SW-190067.", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "HPU is not supported on deepspeed-mii", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-False]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-False]": + "Skip workload takes longer time to run", + "unit/ops/fp_quantizer/test_fp_quant.py::test_fp_quant_selective[bf16]": "Skip on G2 due to SW-209651", + "unit/ops/fp_quantizer/test_fp_quant.py::test_fp_quant[qbits12-bf16]": "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp_quant.py::test_fp_quant[qbits6-bf16]": "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestOptimizedLinear::test[qbit6-bws2]": "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestOptimizedLinear::test[qbit6-bws1]": "Skip on G1 due to SW-209651", +} + +g3_lazy_skip_tests = { + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-False]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-False]": + "Skip workload takes longer time to run", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": "test hang patch:430071", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[4-9-1024]": + "test hang patch:430071", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[4-9-1024]": "test hang patch:430071", +} +hpu_eager_skip_tests = {} + +g1_eager_skip_tests = { + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo-True]": + "Flaky Segfault. Stuck", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo-False]": + "Flaky Segfault. Stuck", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "HPU is not supported on deepspeed-mii", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-False]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-False]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-True]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-True]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-False-False]": + "Struck observed", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-False-False]": + "Flaky struck observed", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestLowCpuMemUsage::test[gpt2-True]": + "Skip struck for longer duration", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian-True-True]": + "Skip struck and fp16 not supported", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config1]": + "Test Hang", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config2]": + "Test Hang", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-False-True]": + "Test Hang", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-True-True]": + "Test Hang", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-True-True]": + "Test Hang", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-True-True]": + "test Hang", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-350m-True]": + "test Hang", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-False-1-dtype0]": + "test Hang", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[EleutherAI/gpt-j-6B-fp16-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-True-True]": + "Skip due to flaky hang", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-j-6B-True]": + "test Hang", + "unit/linear/test_ctx.py::TestEngine::test_model": + "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestOptimizedLinear::test[qbit6-bws2]": + "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestOptimizedLinear::test[qbit8-bws2]": + "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestLoRALinear::test[2]": + "Skip on G1 due to SW-209651", + "unit/linear/test_ctx.py::TestInitTransformers::test_pretrained_init": + "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestBasicLinear::test": + "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestOptimizedLinear::test[qbit8-bws1]": + "Skip on G1 due to SW-209651", + "unit/linear/test_ctx.py::TestInitTransformers::test_config_init": + "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestLoRALinear::test[1]": + "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestQuantLinear::test[8]": + "Skip on G1 due to SW-209651", + "unit/linear/test_quant_param.py::TestQuantParam::test_move_to_accelerator": + "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestQuantLinear::test[6]": + "Skip on G1 due to SW-209651", + "unit/linear/test_quant_param.py::TestQuantParam::test_unsupported_dtypes[dtype0]": + "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestOptimizedLinear::test[qbit6-bws1]": + "Skip on G1 due to SW-209651", + "unit/linear/test_quant_param.py::TestQuantParam::test_requires_grad": + "Skip on G1 due to SW-209651", + "unit/linear/test_quant_param.py::TestQuantParam::test_unsupported_dtypes[dtype1]": + "Skip on G1 due to SW-209651", + "unit/linear/test_quant_param.py::TestQuantParam::test_hf_clone": + "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[2048-qbits8-bf16]": + "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[64-qbits8-bf16]": + "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[2-qbits8-bf16]": + "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[256-qbits8-bf16]": + "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[1-qbits8-bf16]": + "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[128-qbits8-bf16]": + "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[1024-qbits8-bf16]": + "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[8-qbits8-bf16]": + "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[32-qbits8-bf16]": + "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[4-qbits8-bf16]": + "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp8_gemm.py::test_fp_quant[512-qbits8-bf16]": + "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp_quant.py::test_fp_quant_selective[bf16]": + "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp_quant.py::test_fp_quant[qbits12-bf16]": + "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp_quant.py::test_fp_quant_meta[bf16]": + "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp_quant.py::test_fp_quant[qbits6-bf16]": + "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp_quant.py::test_fp_quant[qbits8-bf16]": + "Skip on G1 due to SW-209651", +} + +g2_eager_skip_tests = { + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "Stuck, SW-190067.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[4-9-1024]": + "stuck, SW-190067.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[4-9-1024]": + "Stuck, SW-190067.", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "HPU is not supported on deepspeed-mii", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-False]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-False]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-True]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-True]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton-True-True]": + "Skip struck for longer duration", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-False-True]": + "Test Hang", + "unit/inference/test_inference.py::TestLowCpuMemUsage::test[gpt2-True]": + "Skip struck for longer duration", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo-True]": + "Flaky Segfault. Stuck", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo-True]": + "GC failed so skip to check", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-True-True]": + "Test Hang", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-True-True]": + "Test Hang", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-True-1-dtype1]": + "test Hang", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[bigscience/bloom-560m-fp16-True]": + "test Hang", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[EleutherAI/gpt-j-6B-fp16-True]": + "Skip due to SW-193097", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-j-6B-True]": + "test Hang", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-True-True]": + "Skip due to flaky hang", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-True-True]": + "Skip due to flaky hang", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-False-True]": + "Skip due to flaky hang", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-False-True]": + "Skip due to flaky hang", + "unit/ops/fp_quantizer/test_fp_quant.py::test_fp_quant_selective[bf16]": + "Skip on G2 due to SW-209651", + "unit/ops/fp_quantizer/test_fp_quant.py::test_fp_quant[qbits12-bf16]": + "Skip on G1 due to SW-209651", + "unit/ops/fp_quantizer/test_fp_quant.py::test_fp_quant[qbits6-bf16]": + "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestOptimizedLinear::test[qbit6-bws2]": + "Skip on G1 due to SW-209651", + "unit/linear/test_linear.py::TestOptimizedLinear::test[qbit6-bws1]": + "Skip on G1 due to SW-209651", +} +g3_eager_skip_tests = { + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-False]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-False]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-True]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-True]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLowCpuMemUsage::test[gpt2-True]": + "Skip struck for longer duration", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton-True-True]": + "Skip struck for longer duration", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "test hang patch:430071", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[4-9-1024]": + "test hang patch:430071", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[4-9-1024]": + "test hang patch:430071", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[EleutherAI/gpt-j-6B-fp16-True]": + "Skip due to SW-193097", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[bigscience/bloom-560m-fp16-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-True-True]": + "GC failed so skip to check", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-False-True]": + "GC failed so skip to check", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo-True]": + "GC failed so skip to check", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo-True]": + "Flaky Segfault. Stuck", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-True-True]": + "Test Hang", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-350m-True]": + "test Hang", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-j-6B-True]": + "test Hang", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[bigscience/bloom-560m-True]": + "test Hang", +} + +gpu_skip_tests = { + "unit/runtime/zero/test_zero.py::TestZeroOffloadOptim::test[True]": + "Disabled as it is causing test to stuck. SW-163517.", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": + "Xfail not supported", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-False]": + "skip: timeout triggered", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-False]": + "skip: timeout triggered", +} diff --git a/tests/unit/util.py b/tests/unit/util.py index feec326ede6c..8ae866fdd9aa 100644 --- a/tests/unit/util.py +++ b/tests/unit/util.py @@ -5,6 +5,8 @@ import pytest import torch +import os +import multiprocessing from deepspeed.accelerator import get_accelerator, is_current_accelerator_supported from deepspeed.git_version_info import torch_info @@ -67,3 +69,57 @@ def required_amp_check(): return False else: return True + + +def worker(proc_id, return_dict): + #TODO SW-114787: move to new api outside experimental + import habana_frameworks.torch.utils.experimental as htexp + deviceType = htexp._get_device_type() + if deviceType == htexp.synDeviceType.synDeviceGaudi: + return_dict['devicetype'] = "Gaudi" + elif deviceType == htexp.synDeviceType.synDeviceGaudi2: + return_dict['devicetype'] = "Gaudi2" + elif deviceType == htexp.synDeviceType.synDeviceGaudi3: + return_dict['devicetype'] = "Gaudi3" + else: + return_dict['devicetype'] = None + assert False, f'Unexpected hpu device Type: {deviceType}' + + +def get_hpu_dev_version(): + hpu_dev = None + if get_accelerator().device_name() != 'hpu': + return hpu_dev + if os.getenv("DEEPSPEED_UT_HL_DEVICE", default=None): + hpu_dev = os.getenv("DEEPSPEED_UT_HL_DEVICE") + if hpu_dev not in ["Gaudi", "Gaudi2", "Gaudi3"]: + manager = multiprocessing.Manager() + return_dict = manager.dict() + proc_id = 0 + multiprocessing.set_start_method("spawn", force=True) + p = multiprocessing.Process(target=worker, args=(proc_id, return_dict)) + p.start() + p.join() + try: + dev_type = return_dict['devicetype'] + except: + assert False, 'Unexpected hpu device Type: {}'.format(return_dict['devicetype']) + p.terminate() + exit_code = p.exitcode + if exit_code: + assert False, 'HPU dev type process exit with: {}'.format(exit_code) + if dev_type in ["Gaudi", "Gaudi2", "Gaudi3"]: + hpu_dev = dev_type + os.environ['DEEPSPEED_UT_HL_DEVICE'] = dev_type + return dev_type + else: + assert False, 'Unexpected hpu device Type: {}'.format(return_dict['devicetype']) + else: + return hpu_dev + + +def hpu_lazy_enabled(): + if get_accelerator().device_name() == 'hpu': + import habana_frameworks.torch.hpu as thpu + return thpu.is_lazy() + return False diff --git a/tests/unit/xfail_marker.py b/tests/unit/xfail_marker.py new file mode 100644 index 000000000000..a640dc383dc0 --- /dev/null +++ b/tests/unit/xfail_marker.py @@ -0,0 +1,5948 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +hpu_lazy_xfail_tests = {} + +g1_lazy_xfail_tests = { + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163095.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163095.", + "unit/inference/test_inference.py::TestModelTask::test[distilgpt2-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilgpt2-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[gpt2-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp32-CG-noTriton-True-False]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-bf16-CG-noTriton-True-False]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "Xfail, due to FP16 not supported.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "Xfail, due to FP16 not supported.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "Xfail, due to FP16 not supported.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "Xfail, due to FP16 not supported.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "Xfail, due to FP16 not supported.", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-fp32-zero3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-zero3]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_model_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant_cpu_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_half_int4_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization_cpu_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_quantized_linear": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_half_int8_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdamW-AdamW]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuSGD-SGD]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdam-Adam]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdamW-AdamW]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdam-Adam]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuSGD-SGD]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-1-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-2-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-2-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-1-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[2-20-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[1-8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[1-20-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[2-8-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[4-20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[4-8-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[4-20-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[1-8-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[2-8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[1-20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[2-20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[4-8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[2-8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[2-20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[4-20-4000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[4-8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[2-8-4000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[4-20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[2-20-4000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[4-8-4000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qgzero.py::TesthpZeroConfigSweep::test[20-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qgzero.py::TesthpZeroConfigSweep::test[8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qgzero.py::TesthpZeroConfigSweep::test[20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qgzero.py::TesthpZeroConfigSweep::test[8-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[8-2048]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[20-2048]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[2]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[4]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[4]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[2]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[2]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[4]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-350m-False]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestLowCpuMemUsage::test[gpt2-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-255-2]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestAutoTP::test[falcon-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe[4]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe.py::TestPRMoE::test[2-True]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe.py::TestPRMoE::test[2-False]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-1-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-2-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-2-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-2-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-1-4]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-1-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-1-4]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-1-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-2-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-1-4]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-1-4]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-1-2]": + "Xfail, FP16 not supported.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_subclass_param": + "Xfail, due to FP16 not supported.", + "unit/runtime/zero/test_zero_context_ancestry.py::TestSerialParamInit::test_subclass_param_init": + "Xfail, due to FP16 not supported.", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-j-False]": + "Xfail, due to SW-162660.", + "unit/inference/test_inference.py::TestMPSize::test[bf16-gpt-neo-False]": + "Xfail, due to SW-175376.", + "unit/inference/test_inference.py::TestMPSize::test[bf16-gpt-j-False]": + "Xfail, due to SW-162660.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-256-52-4-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True0]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True1]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-4096-128-64-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-120-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-53-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-160-128-2-24-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-511-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[1-256-2048-32-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-54-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-21-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-2-3-True-True-0.05]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-160-128-2-3-True-True-0.1]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-25-3-True-True-0.05]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-160-128-2-24-False-True-0.2]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-1600-128-2-4-False-True-0.2]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]": + "Xfail, due to FP16 not supported.", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]": + "Xfail, due to FP16 not supported.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=1]": + "Xfail, due to FP16 not supported.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=2]": + "Xfail, due to FP16 not supported.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=1]": + "Xfail, due to FP16 not supported.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=2]": + "Xfail, due to FP16 not supported.", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": + "Xfail, due to SW-170181.", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[8-1024]": + "Xfail, due to FP16 not supported.", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[4-1024]": + "Xfail, due to FP16 not supported.", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": + "Xfail, due to SW-168442.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp16]": + "Xfail, due to Gaudi1 does not support FP16.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]": + "Xfail, due to Gaudi1 does not support FP16.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-local-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-True-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_model_quantization[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int8_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int4_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-1]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-0]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-1]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-0]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_model_quantization[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[4bits]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-1-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-1-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-2-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-2-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True]": + "Xfail, FP16 not supported.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False]": + "Xfail, FP16 not supported.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False]": + "Xfail, FP16 not supported.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True]": + "Xfail, FP16 not supported.", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[default-fp16]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_wrapper.py::TestCustomMethod::test_custom_function": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-2-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compiler_fn": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_kwargs": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_disabled": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_custom_backend": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compile_kwargs": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]": + "Not supported on Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]": + "Not supported on Gaudi1", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config1]": + " Comm Init Rank Error.", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config2]": + " Comm Init Rank Error.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]": + "Xfail, due to SW-178730.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-False]": + "Fp16 not supported by Gaudi1", + "unit/moe/test_moe.py::TestMoE::test[True-0-4]": + "Xfail, due to FP16 not supported", + "unit/moe/test_moe.py::TestMoE::test[False-0-2]": + "Xfail, due to FP16 not supported.", + "unit/moe/test_moe.py::TestMoE::test[True-0-2]": + "Xfail, due to FP16 not supported.", + "unit/moe/test_moe.py::TestMoE::test[False-0-4]": + "Xfail, due to FP16 not supported.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_shared_weights.py::TestCheckpointSharedWeights::test_checkpoint_shared_weights[True]": + "Xfail, due to SW-179861.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-True]": + "Xfail, due to SW-179868.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-True]": + "Xfail, due to SW-179868.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-True]": + "Fp16 not supported by Gaudi1.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-False]": + "Fp16 not supported by Gaudi1", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalescedTensorSmallerThanWorldSize::test": + "fp16 is not supported Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalesced::test_single_input": + "fp16 is not supported Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalesced::test_two_inputs": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[3]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestGatherUpdate::test": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestScatterGather::test": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context_ancestry.py::TestDSInitWZinit::test": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-local-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-local-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-full-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/test_data_efficiency.py::TestLegacyCurriculumScheduler::test_fixed_discrete": + "fp16 is not supported Gaudi.", + "unit/runtime/test_data_efficiency.py::TestLegacyCurriculumScheduler::test_fixed_linear": + "fp16 is not supported Gaudi.", + "unit/runtime/test_data_efficiency.py::TestDataEfficiency::test_curriculum_learning": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_hjson": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_dict": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_json": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestDistInit::test": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestDeprecatedDeepScaleConfig::test": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestArgs::test_none_args": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestArgs::test_no_args": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestInitNoOptimizer::test": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[0]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_ignore_unused_parameters.py::TestStage2IgnoreUnusedParameters::test[True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_ignore_unused_parameters.py::TestStage2IgnoreUnusedParameters::test[False]": + "fp16 is not supported Gaudi.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[False]": + "fp16 is not supported Gaudi.", + "unit/runtime/test_pld.py::TestNonPLDModel::test_non_pld_model": + "fp16 is not supported Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.1]": + "fp16 is not supported Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[1.0]": + "fp16 is not supported Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.9]": + "fp16 is not supported Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_ext_param_getattr": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[dict]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_ext_param_return": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[tensor]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[None]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-1-full-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-1-full-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-2-full-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-2-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-1-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-1-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-2-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-2-full-False]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-facebook/opt-350m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-facebook/opt-350m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-EleutherAI/gpt-neo-125m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-bigscience/bloom-560m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-bigscience/bloom-560m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[True]": + "fp16 is not supported Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-j-6B-False]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-neo-125M-False]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[bigscience/bloom-560m]-False": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-125m-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_autocast.py::TestAutoCastDisable::test_missing_amp_autocast[True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3InitForParentWeightInitialization::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningManyParams::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroPartitionCache::test_training_partition_cache[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroPartitionCache::test_training_partition_cache[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[list]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[tuple]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[dict]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[True-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[False-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_scatter_halftype": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-False]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-2-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-2-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-1-4]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestPartitionNcclAlignment::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningManyParams::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadStage1::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[False-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[True-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[True-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[False-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[True-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[True-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[False-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[False-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestIncorectAllgatherBucketSize::test[1001]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestIncorectAllgatherBucketSize::test[1000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[False-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningBase::test_fp16_enabled[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadOptim::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadOptim::test[False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton-False-False]": + "Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor": + "float16/half is not supported on Gaudi.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu:0]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestParamPartitioningSkipInit::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_no_grad_input_error": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_choose_module_by_counter": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_choose_module_by_rank": + "float16/half is not supported on Gaudi.", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl]": + "xfail due to model download", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo-False]": + "Xfail due to FP16 not supported on gaudi", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-False]": + "Xfail due to FP16 not supported on gaudi", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[bigscience/bloom-560m-False]": + "Xfail due to FP16 not supported", + "unit/moe/test_moe.py::TestSimpleMoE::test[2]": + "Xfail due to fp16 not supported", + "unit/moe/test_moe.py::TestSimpleMoE::test[1]": + "Xfail due to fp16 not supported", + "unit/moe/test_moe.py::TestSimpleMoE::test[0]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian-True-False]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian-True-False]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian-False-False]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian-False-False]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-True-False]": + "Xfail due to SW-205776", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-True-False]": + "Xfail due to SW-205776", +} + +g2_lazy_xfail_tests = { + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163095.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163095.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163095.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp32-CG-noTriton-True-False]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-bf16-CG-noTriton-True-False]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[gpt2-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp16-CG-noTriton-True-False]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": # noqa: F601 + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": # noqa: F601 + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": # noqa: F601 + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": # noqa: F601 + "Xfail, due to SW-163097.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-120-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-256-52-4-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-53-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-160-128-2-24-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-4096-128-64-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True0]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[1-256-2048-32-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-21-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True1]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-54-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-511-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-2-3-True-True-0.05]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-1600-128-2-4-False-True-0.2]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-160-128-2-3-True-True-0.1]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-25-3-True-True-0.05]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-160-128-2-24-False-True-0.2]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-j-False]": + "Xfail, due to SW-162660.", + "unit/inference/test_inference.py::TestMPSize::test[bf16-gpt-neo-False]": + "Xfail, due to SW-.", + "unit/inference/test_inference.py::TestMPSize::test[bf16-gpt-j-False]": + "Xfail, due to SW-162660.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-False]": + "Xfail, due to SW-.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-j-False]": + "Xfail, due to SW-162660.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-4-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-4-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-4-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "Xfail, due to SW-164239.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "Xfail, due to SW-162575.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant_cpu_offload": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization_cpu_offload": + "Xfail, due to SW-162660.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_subclass_param": + "Xfail, due to SW-156783.", + "unit/runtime/zero/test_zero_context_ancestry.py::TestSerialParamInit::test_subclass_param_init": + "Xfail, due to SW-143227.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-1-dtype1]": + "Xfail, due to SW-145262.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-dtype1]": + "Xfail, due to SW-145262.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-2-dtype1]": + "Xfail, due to SW-145262.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfail, due to SW-164545.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant_nvme_offload": + "Xfail, due to SW-164545.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuSGD-SGD]": + "Xfail, due to SW-164551.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuSGD-SGD]": + "Xfail, due to SW-164551.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdam-Adam]": + "Xfail, due to SW-164551.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdam-Adam]": + "Xfail, due to SW-164551.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdamW-AdamW]": + "Xfail, due to SW-164551.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdamW-AdamW]": + "Xfail, due to SW-164551.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[4]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[4]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[1]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[2]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[4]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[1]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[2]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[1]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[2]": + "Xfail, due to SW-164577.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-dtype1]": + "Xfail, due to SW-164593.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_quantized_linear": + "Xfail, due to SW-164606.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[20-1024]": + "Xfail, due to SW-156782.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[20-2048]": + "Xfail, due to SW-156782.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[8-2048]": + "Xfail, due to SW-156782.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[8-1024]": + "Xfail, due to SW-156782.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-noCG-noTriton-False-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": # noqa: F601 + "Xfail, due to SW-170181.", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": # noqa: F601 + "Xfail, due to SW-168442.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp16]": # noqa: F601 + "Xfail, due to SW-162575.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-0]": + "Xfail, due to SW-168583.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-1]": + "Xfail, due to SW-168583.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-1]": + "Xfail, due to SW-168583.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-0]": + "Xfail, due to SW-168583.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[4bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[4bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[8bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[4bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[8bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[4bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[8bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[8bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "Xfail, due to SW-164545.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfail, due to SW-164545.", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-True-DeepSpeedCPULion]": + "skipping due to HPU is not supported FusedLion, SW-176903", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-False-FusedLion]": + "skipping due to HPU is not supported FusedLion, SW-176903", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-509-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-24-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-381-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-56-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-51-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-119-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]" : "Xfail, due to SW-178730.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_shared_weights.py::TestCheckpointSharedWeights::test_checkpoint_shared_weights[True]" : "Xfail, due to SW-179861.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]" : "Xfail, due to SW-179867.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]" : "Xfail, due to SW-179867.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]" : "Xfail, due to SW-179867.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]" : "Xfail, due to SW-179867.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-True]" : "Xfail, due to SW-179868.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-True]" : "Xfail, due to SW-179868.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-True]" : "Xfail, due to SW-175716.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype1]" : "Xfail, due to SW-175716.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]" : "Xfail, due to SW-175716.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype1]" : "Xfail, due to SW-175716.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]" : "Xfail, due to SW-175716.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-True]" : "Xfail, due to SW-180488.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-True]" : "Xfail, due to SW-180488.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-True]" : "Xfail, due to SW-180488.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-True]" : "Xfail, due to SW-180488.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fp32_optimizer[True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest[True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-True]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-True]" : "Xfail, due to SW-175716.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton-False-False]":"Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-bf16-noCG-noTriton-True-False]":"Xfail due to SW-181935", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]":"Xfail due to SW-182749", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]":"Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']":"Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']":"Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]":"Xfail due to SW-182753", + "unit/runtime/zero/test_zero.py::TestParamPartitioningSkipInit::test":"Xfail due to SW-", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]":"Xfail due to SW-182759", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor":"Xfail due to SW-182766", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible":"Xfail due to SW-182766", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl]":"xfail due to model download", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton-True-False]":"Xfail due to SW-181935", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton-False-False]": "xfail due to SW-184834", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]":" xfail due to SW-185015", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]":" xfail due to SW-185015", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int8_quantization":"Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int4_quantization":"Xfail due to SW-182766", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[False-3]":"Xfail due to sw-201549", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[True-3]":"Xfail due to sw-201549", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[True-2]":"Xfail due to sw-201549", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[False-3]":"Xfail due to sw-201549", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[True-2]":"Xfail due to sw-201549", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[True-3]":"Xfail due to sw-201549", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-False]":"Xfail due to sw-201549", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-False]":"Xfail due to sw-201549", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-False]":"Xfail due to sw-201549", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-False]":"Xfail due to sw-201549", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-False]":"Xfail due to sw-201549", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-False]":"Xfail due to sw-201549", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-False]":"Xfail due to sw-201549", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-False]":"Xfail due to sw-201549", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-False]":"Xfail due to sw-201549", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-False]":"Xfail due to sw-201549", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-False]":"Xfail due to sw-201549", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-False":"Xfail due to sw-201549", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]":"Xfail due to SW-200127", + "unit/linear/test_linear.py::TestQuantLinear::test[6]": "AttributeError: 'Parameter' object has no attribute 'dequantized'", + "unit/linear/test_linear.py::TestQuantLinear::test[8]": "AttributeError: 'Parameter' object has no attribute 'dequantized'", + "unit/linear/test_linear.py::TestOptimizedLinear::test[qbit8-bws1]": "AttributeError: 'Parameter' object has no attribute 'dequantized'", + "unit/linear/test_linear.py::TestOptimizedLinear::test[qbit6-bws1]": "AttributeError: 'Parameter' object has no attribute 'dequantized'", + "unit/linear/test_quant_param.py::TestQuantParam::test_hf_clone": "AssertionError: Quantize fallback only supports quantization to FP8", + "unit/linear/test_ctx.py::TestEngine::test_model": "Xfail due to SW-209267", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-True-False]":"Xfail due to SW-205776", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian-True-False]":"Xfail due to SW-205776", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian-True-False]":"Xfail due to SW-205776", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-True-False]":"Xfail due to SW-205776", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-False]":"Xfail due to SW-201549", +} + +g3_lazy_xfail_tests = { + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-1-dtype2]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-1-dtype1]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-1-dtype2]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-1-dtype2]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-1-dtype0]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-1-dtype0]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-1-dtype0]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-1-dtype0]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-1-dtype0]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-1-dtype1]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-1-dtype2]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-1-dtype0]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-1-dtype1]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-1-dtype2]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-1-dtype1]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-1-dtype2]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-1-dtype1]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-1-dtype1]": + "Xfail due to SW-187590", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": + "Xfail due to SW-168442", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]": + "xfail due to SW-185015", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]": + "xfail due to SW-185015", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-0]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-1]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-0]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[8bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[4bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[4bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[4bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[8bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[4bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[8bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[8bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-1]": + "Xfail, due to SW-168583", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": + "Xfail, due to SW-163097", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": + "Xfail, due to SW-163097", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": + "Xfail, due to SW-163097", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": + "Xfail, due to SW-163097", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": + "Xfail, due to SW-170181.", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-56-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-160-128-2-24-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-511-16-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-509-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-119-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-51-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True0]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-256-52-4-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-2-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-4096-128-64-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-24-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-381-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-54-16-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True1]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-53-16-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-120-16-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[1-256-2048-32-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-21-16-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-1600-128-2-4-False-True-0.2]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-2-3-True-True-0.05]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-160-128-2-24-False-True-0.2]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-25-3-True-True-0.05]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-160-128-2-3-True-True-0.1]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "Xfail, due to SW-182502", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-False-FusedLion]": + "Xfail, due to SW-176903", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-True-DeepSpeedCPULion]": + "Xfail, due to SW-176903", + "unit/ops/transformer/inference/test_bias_geglu.py::test_bias_geglu[dtype1-512-1-1]": + "Xfail flaky", + "unit/ops/transformer/inference/test_bias_geglu.py::test_gated_silu[dtype0-512-1-1]": + "Xfail flaky", + "unit/runtime/zero/test_zero.py::TestParamPartitioningSkipInit::test": + "Xfail due to SW-181939", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "Xfail, due to SW-164239", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "Xfail, due to SW-164239", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "Xfail, due to SW-164239", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConvergence::test[gpt2]": + "XFail for now", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_subclass_param": + "Xfail, due to SW-156783", + "unit/runtime/zero/test_zero_context_ancestry.py::TestSerialParamInit::test_subclass_param_init": + "Xfail, due to SW-143227.", + "unit/runtime/zero/test_zero_nesting_init.py::TestNestedParallelInit::test_nested_parallel_init": + "Xfail download issue", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible": + "Xfail due to SW-182766", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor": + "Xfail due to SW-182766", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdam-Adam]": + "Xfail, due to SW-164551", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdam-Adam]": + "Xfail, due to SW-164551", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuSGD-SGD]": + "Xfail, due to SW-164551", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdamW-AdamW]": + "Xfail, due to SW-164551", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuSGD-SGD]": + "Xfail, due to SW-164551", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdamW-AdamW]": + "Xfail, due to SW-164551", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom]": + "Xfail due to RuntimeError: Incompatible input shapes, broadcast not possible. Tensor1 Size: 5 5 16 1 Tensor2 Size: 5 1 8During handling of the above exception, another exception occurred", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "Xfail due to SW-182759", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]": + "Xfail, due to SW-178730.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-False]": + "Xfail due to SW-188513", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config1]": + "xfail due to SW-194902", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int8_quantization": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int4_quantization": + "Xfail due to SW-182766", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[False-3]": + "Xfail due to sw-201549", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[True-3]": + "Xfail due to sw-201549", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[True-2]": + "Xfail due to sw-201549", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[False-3]": + "Xfail due to sw-201549", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[True-2]": + "Xfail due to sw-201549", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[True-3]": + "Xfail due to sw-201549", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-False]": + "Xfail due to sw-201549", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-False]": + "Xfail due to sw-201549", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-False]": + "Xfail due to sw-201549", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-False]": + "Xfail due to sw-201549", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-False]": + "Xfail due to sw-201549", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-False]": + "Xfail due to sw-201549", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-False]": + "Xfail due to sw-201549", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-False]": + "Xfail due to sw-201549", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-False]": + "Xfail due to sw-201549", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-False]": + "Xfail due to sw-201549", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-False]": + "Xfail due to sw-201549", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-False": + "Xfail due to sw-201549", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]": + "Xfail due to SW-200127", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config2]": + "Xfail due to SW-203893", + "unit/linear/test_linear.py::TestQuantLinear::test[6]": + "AttributeError: 'Parameter' object has no attribute 'dequantized'", + "unit/linear/test_linear.py::TestQuantLinear::test[8]": + "AttributeError: 'Parameter' object has no attribute 'dequantized'", + "unit/linear/test_linear.py::TestOptimizedLinear::test[qbit8-bws1]": + "AttributeError: 'Parameter' object has no attribute 'dequantized'", + "unit/linear/test_linear.py::TestOptimizedLinear::test[qbit6-bws1]": + "AttributeError: 'Parameter' object has no attribute 'dequantized'", + "unit/linear/test_quant_param.py::TestQuantParam::test_hf_clone": + "AssertionError: Quantize fallback only supports quantization to FP8", + "unit/linear/test_ctx.py::TestEngine::test_model": + "Xfail due to SW-209267", +} + +hpu_eager_xfail_tests = {} + +g1_eager_xfail_tests = { + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": + "Xfail, due to SW-170181.", + "unit/runtime/test_autocast.py::TestAutoCastDisable::test_missing_amp_autocast[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalescedTensorSmallerThanWorldSize::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalesced::test_two_inputs": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalesced::test_single_input": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-fp32-zero3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-zero3]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp16]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]": + "float16/half is not supported on Gaudi.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-1]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int8_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_model_quantization[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_model_quantization[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int4_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-1]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-0]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-0]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[dict]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[tuple]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[list]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3InitForParentWeightInitialization::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[False-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[True-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningManyParams::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroPartitionCache::test_training_partition_cache[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroPartitionCache::test_training_partition_cache[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestScatterGather::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestGatherUpdate::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_scatter_halftype": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_subclass_param": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_ancestry.py::TestDSInitWZinit::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_ancestry.py::TestSerialParamInit::test_subclass_param_init": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-local-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-local-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-local-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[default-fp16]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_data_efficiency.py::TestDataEfficiency::test_curriculum_learning": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_data_efficiency.py::TestLegacyCurriculumScheduler::test_fixed_linear": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_data_efficiency.py::TestLegacyCurriculumScheduler::test_fixed_discrete": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestDeprecatedDeepScaleConfig::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestDistInit::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_hjson": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_json": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_dict": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestArgs::test_no_args": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestArgs::test_none_args": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestInitNoOptimizer::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[0]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_ignore_unused_parameters.py::TestStage2IgnoreUnusedParameters::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_ignore_unused_parameters.py::TestStage2IgnoreUnusedParameters::test[True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-False]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-0-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-0-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-0-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-0-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-2-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-2-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestPRMoE::test[2-False]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestPRMoE::test[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe[4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-2-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdamW-AdamW]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuSGD-SGD]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuSGD-SGD]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdamW-AdamW]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdam-Adam]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdam-Adam]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_pld.py::TestNonPLDModel::test_non_pld_model": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.9]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[1.0]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestPartitionNcclAlignment::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadOptim::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadOptim::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-True-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningBase::test_fp16_enabled[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestIncorectAllgatherBucketSize::test[1000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestIncorectAllgatherBucketSize::test[1001]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[True-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[True-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[False-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[False-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[False-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[True-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[False-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[True-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[False-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningManyParams::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadStage1::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_ext_param_getattr": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[tensor]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[None]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[dict]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_ext_param_return": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[8-1024]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-1-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-2-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-2-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-2-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-1-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-1-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-2-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-1-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-1-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-2-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-1-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-2-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-False]": + "float16/half is not supported on Gaudi.", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": + "Xfail, due to SW-168442.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compile_kwargs": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_disabled": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compiler_fn": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_kwargs": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_custom_backend": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_wrapper.py::TestCustomMethod::test_custom_function": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-True]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-j-6B-False]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-neo-125M-False]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[bigscience/bloom-560m-False]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-125m-False]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-350m-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-255-2]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=2]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=2]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-bigscience/bloom-560m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-facebook/opt-350m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-bigscience/bloom-560m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-facebook/opt-350m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-EleutherAI/gpt-neo-125m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestMPSize::test[bf16-gpt-neo-False]": + "Xfail, due to SW-175376.", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo-False]": + "Xfail, due to SW-175376.", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible": + "float16/half is not supported on Gaudi.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu:0]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestParamPartitioningSkipInit::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_choose_module_by_counter": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_choose_module_by_rank": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_no_grad_input_error": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-182748", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/inference/test_inference.py::TestInjectionPolicy::test[fp32-t5-False]": + "Xfail, due to SW-182668", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-False-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton-False-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-bf16-noCG-noTriton-False-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-bf16-noCG-noTriton-False-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-bf16-noCG-noTriton-False-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-bf16-noCG-noTriton-False-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-False-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton-False-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-182671", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config2]": + "Xfail due to SW-182509", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config1]": + "Xfail due to SW-182509", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype2]": + "Xfail due to SW-181951", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype0]": + "Xfail due to SW-181951", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]": + "Xfail due to OP not implemented on HPU", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]": + "Xfail due to OP not implemented on HPU", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-False]": + "xfail due to model download", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo]": + "Xfail due to FP16 not supported on gaudi", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom]": + "Xfail due to FP16 not supported on gaudi", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws2-fp32-t5-False]": + "Xfail, due to SW-.", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws1-fp32-t5-False]": + "Xfail, due to SW-.", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-True]": + "Fp16 not supported", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-False]": + "FP16 not supported", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-125m-True]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-neo-125M-True]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-j-6B-True]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[bigscience/bloom-560m-True]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-350m-True]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo-True]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo-False]": + "Xfail due to FP16 not supported", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton-False-False]": + "Xfail due to sw-182671", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton-False-True]": + "Xfail due to SW-196571", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton-False-True]": + "Xfail due to SW-196571", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws2-fp32-roberta-True]": + "Xfail due to sw-193404", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws2-fp32-t5-True]": + "Xfail due to sw-193404", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws1-fp32-t5-True]": + "xfail due to sw-187946", + "unit/moe/test_moe.py::TestSimpleMoE::test[2]": + "Xfail due to fp16 not supported", + "unit/moe/test_moe.py::TestSimpleMoE::test[1]": + "Xfail due to fp16 not supported", + "unit/moe/test_moe.py::TestSimpleMoE::test[0]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton-False-True]": + "Xfail due to SW-195011", + "unit/runtime/test_multi_output_model.py::TestThreeOutputModel::test": + "xfail due to 198794", + "unit/runtime/test_multi_output_model.py::TestTwoOutputModel::test": + "xfail due to 198794", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-True]": + "xfail due to SW-199012", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-False-True]": + "xfail due to SW-189257", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-True-False]": + "xfail due to SW-182671", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_bf16_fragments[False]": + "Xfail due to SW-201247", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian-True-False]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian-True-False]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian-False-False]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian-False-False]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian-True-True]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian-False-True]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian-True-True]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian-False-True]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-False-True]": + "Xfail due to SW-203016", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-True-True]": + "Xfail due to SW-203016", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-False-True]": + "Xfail due to SW-203016", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-True-True]": + "Xfail due to SW-203016", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-True-False]": + "Xfail due to SW-203016", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-False-False]": + "Xfail due to SW-203016", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-True-False]": + "Xfail due to SW-203016", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-False-False]": + "Xfail due to SW-203016", +} + +g2_eager_xfail_tests = { + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": + "Xfail, due to SW-170181.", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-160-128-2-24-False-True-0.2]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-1600-128-2-4-False-True-0.2]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-2-3-True-True-0.05]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-25-3-True-True-0.05]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-160-128-2-3-True-True-0.1]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-True-False]": + "Xfail, due to SW-176905.", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": + "Xfail, due to SW-168442.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]": + "Xfail, due to SW-178730.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "Xfail, due to SW-164545.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfail, due to SW-164545.", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-True-DeepSpeedCPULion]": + "Xfail, due to SW-176903.", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-False-FusedLion]": + "Xfail, due to SW-176903.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-56-16-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-119-16-3-True-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-2-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-509-16-3-True-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-120-16-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[1-256-2048-32-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-256-52-4-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-511-16-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-54-16-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True0]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True1]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-53-16-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-381-16-3-True-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-160-128-2-24-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-4096-128-64-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-24-16-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-21-16-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-51-16-3-True-False]": + "Xfail, due to SW-176905.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-False]": + "Xfail, due to SW-196522", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": + "Xfail, due to SW-163097.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-4-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-4-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-4-1024]": + "Xfail, due to SW-164239.", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton-False-False]": + "Xfail due to SW-182748", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor": + "Xfail due to SW-182766", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible": + "Xfail due to SW-182766", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-181935", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-181935", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]": + "Xfail, due to SW-182502", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]": + "Xfail due to op not been implemented on HPU", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]": + "Xfail due to op not been implemented on HPU", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]": + "Xfail due to op not been implemented on HPU", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-False]": + "xfail due to model download", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]": + " xfail due to SW-185015", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]": + " xfail due to SW-185015", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp16]": + "Xfail due to SW-182502", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inf.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-True]": + "Xfail due to 189259", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-1-full-False]": + "Xfail due to SW-187946", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_bf16_fragments[False]": + "Xfail due to SW-187946", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-2-full-False]": + "Xfail due to SW-187946", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-2-full-False]": + "Xfail due to SW-187946", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-1-full-False]": + "Xfail due to SW-187946", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws2-fp32-roberta-True]": + "Xfail due to sw-193404", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws2-fp32-t5-True]": + "Xfail due to sw-193404", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[8bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-1]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int4_quantization": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[8bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[4bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[8bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[4bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-1]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[4bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int8_quantization": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[4bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[8bits]": + "Xfail due to SW-182766", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-True]": + "Xfail due to SW-199012", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-True]": + "Xfail due to SW-199012", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-True]": + "xfail due to SW-163097", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]": + "xfail due to sw-201097", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]": + "xfail due to sw-201097", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]": + "xfail due to sw-201097", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]": + "xfail due to sw-201097", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-True-True]": + "Xfail due to sw-203720", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-True-True]": + "Xfail due to sw-203720", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian-True-True]": + "Xfail due to sw-203720", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian-True-True]": + "Xfail due to sw-203720", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-True-False]": + "Xfail due to SW-203720", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian-True-False]": + "Xfail due to SW-203720", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-True-False]": + "Xfail due to SW-203720", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian-True-False]": + "Xfail due to SW-203720", + "unit/linear/test_quant_param.py::TestQuantParam::test_hf_clone": + "AssertionError: Quantize fallback only supports quantization to FP8", + "unit/linear/test_linear.py::TestQuantLinear::test[6]": + "AssertionError: Quantize fallback only supports quantization to FP8", + "unit/linear/test_linear.py::TestOptimizedLinear::test[qbit6-bws2]": + "AssertionError: Quantize fallback only supports quantization to FP8", + "unit/linear/test_linear.py::TestOptimizedLinear::test[qbit6-bws1]": + "AssertionError: Quantize fallback only supports quantization to FP8", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-False]": + "Xfail due to SW-209651", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-False]": + "Xfail due to SW-209651", +} +g3_eager_xfail_tests = { + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-1-dtype1]": + "Xfail due to SW-196568 This op had not been implemented on HPU backend", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]": + "Xfail due to SW-196568 This op had not been implemented on HPU backend", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-1-dtype0]": + "Xfail due to SW-196568 This op had not been implemented on HPU backend", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]": + "Xfail due to SW-196568 This op had not been implemented on HPU backend", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]": + "Xfail due to SW-196568 This op had not been implemented on HPU backend", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-160-128-2-24-False-True-0.2]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-25-3-True-True-0.05]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-160-128-2-3-True-True-0.1]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-2-3-True-True-0.05]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-1600-128-2-4-False-True-0.2]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-True-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-False-True]": + "xfail due to SW-176905", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor": + "xfail due to SW-182766", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible": + "xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "xfail due to SW-168596", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "xfail due to SW-168596", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-False-FusedLion]": + "xfail due to SW-176903", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-True-DeepSpeedCPULion]": + "xfail due to SW-176903", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "Xfail due to SW-188274", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "Xfail due to SW-188274", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "Xfail due to SW-188274", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "Xfail due to SW-188274", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "Xfail due to SW-188274", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]": + "Xfail due to SW-188274", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp16]": + "Xfail due to SW-188274", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-1-dtype0]": + "Xfail due to SW-187821", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": + "Xfail due to SW-168442", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "Xfail due to SW-182759", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": + "Xfail, due to SW-170181.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-160-128-2-24-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-509-16-3-True-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-4096-128-64-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True1]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-21-16-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-51-16-3-True-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-53-16-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-381-16-3-True-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-120-16-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True0]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[1-256-2048-32-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-119-16-3-True-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-54-16-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-511-16-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-24-16-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-56-16-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-256-52-4-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-2-3-True-True]": + "xfail due to SW-176905", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": + "Xfail, due to SW-163097", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": + "Xfail, due to SW-163097", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": + "Xfail, due to SW-163097", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": + "Xfail, due to SW-163097", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "Xfail, due to SW-164239", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "Xfail, due to SW-164239", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "Xfail, due to SW-164239", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-False]": + "Xfail, due to SW-196522.", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]": + "xfail due to SW-185015", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]": + "xfail due to SW-185015", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-True-False]": + "Graphic compile failed", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-False-False]": + "Graph compile failed", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]": + "Xfail, due to SW-178730.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-False-True]": + "Xfail due to SW-196571 Assertion error", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-False-True]": + "Xfail due to SW-196571 Assertion error", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-False-True]": + "Xfail due to SW-196571 Assertion error", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-False-True]": + "Xfail due to SW-196571 Assertion error", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-True]": + "Xfail due to SW-196522", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws2-fp32-roberta-True]": + "Xfail due to sw-193404", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws2-fp32-t5-True]": + "Xfail due to sw-193404", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[8bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-1]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int4_quantization": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[8bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[4bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[8bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[4bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-1]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[4bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int8_quantization": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[4bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[8bits]": + "Xfail due to SW-182766", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-True]": + "Xfail due to SW-199012", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-True]": + "Xfail due to SW-199012", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]": + "xfail due to sw-201097", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]": + "xfail due to sw-201097", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]": + "xfail due to sw-201097", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]": + "xfail due to sw-201097", + "unit/linear/test_quant_param.py::TestQuantParam::test_hf_clone": + "AssertionError: Quantize fallback only supports quantization to FP8", + "unit/linear/test_linear.py::TestQuantLinear::test[6]": + "AssertionError: Quantize fallback only supports quantization to FP8", + "unit/linear/test_linear.py::TestOptimizedLinear::test[qbit6-bws2]": + "AssertionError: Quantize fallback only supports quantization to FP8", + "unit/linear/test_linear.py::TestOptimizedLinear::test[qbit6-bws1]": + "AssertionError: Quantize fallback only supports quantization to FP8", +} +gpu_xfail_tests = { + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]": + "Test requires higher memory.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant_nvme_offload": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfailed. failure observed on vanilla as well.", + "unit/ops/quantizer/test_fake_quantization.py::test_fake_quant_dequant[16-tensor_shape0]": + "Xfailed. failure observed on vanilla as well.", + "unit/ops/quantizer/test_fake_quantization.py::test_fake_quant_dequant[1-tensor_shape0]": + "Xfailed. failure observed on vanilla as well.", + "unit/ops/quantizer/test_fake_quantization.py::test_fake_quant_dequant[16-tensor_shape1]": + "Xfailed. failure observed on vanilla as well.", + "unit/ops/quantizer/test_fake_quantization.py::test_fake_quant_dequant[1-tensor_shape1]": + "Xfailed. failure observed on vanilla as well.", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]": + "Test requires higher memory.", + "unit/inference/v2/kernels/ragged_ops/test_atom_builder.py::test_single_sequence[seq_params2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_atom_builder.py::test_single_sequence[seq_params0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_atom_builder.py::test_single_sequence[seq_params3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_atom_builder.py::test_single_sequence[seq_params1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_multiple_prompts[prompt_lengths3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_multiple_prompts[prompt_lengths1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_continuation[seq_params1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[2037]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_rotary_emb[False]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_gqa[head_config0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_rotary_emb[True]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[65]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[256]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_head_size[128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[33]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_continuation[seq_params4]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_gqa[head_config2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_head_size[64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_multiple_prompts[prompt_lengths2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_fully_composed": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_gqa[head_config1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_multiple_prompts[prompt_lengths0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_continuation[seq_params0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_continuation[seq_params3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_continuation[seq_params2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_multiple_blocks[177-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_multiple_blocks[117-88]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_single_block[33-8]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_multiple_blocks[169-8]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_single_block[17-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_multiple_blocks[128-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_multi_sequence": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_single_block[1-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_single_block[63-1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[False-169-8]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_multi_sequences[True]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[False-1-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[True-169-8]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[True-1-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[False-177-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_multi_sequences[False]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[True-33-15]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[True-17-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[False-33-15]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[False-128-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[True-117-88]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[False-17-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[False-1-63]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[True-128-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[False-117-88]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[True-1-63]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[True-177-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_supported_dtypes[dtype0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_problem_size_permutations[1024]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_multiple_sequences[seq_lens0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_problem_size_permutations[6144]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_multiple_sequences[seq_lens3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_supported_dtypes[dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_problem_size_permutations[6784]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_multiple_sequences[seq_lens2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_multiple_sequences[seq_lens1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[False-278-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[False-13-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[False-1977-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[True-278-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[True-13-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[True-1977-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[True-13-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[False-13-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[True-1977-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[True-278-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[False-278-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[False-1977-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_positional_embedding[seq_lens0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_problem_size_permutations[50304-6144]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_dtype_permutations[embed_dtype1-token_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_positional_embedding[seq_lens1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_complex_sequences[True-seq_lens1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_positional_embedding_offset": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_problem_size_permutations[32000-5120]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_complex_sequences[True-seq_lens0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_problem_size_permutations[1024-1024]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_positional_embedding[seq_lens3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_dtype_permutations[embed_dtype0-token_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_complex_sequences[False-seq_lens0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_dtype_permutations[embed_dtype0-token_dtype0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_positional_embedding[seq_lens2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_dtype_permutations[embed_dtype1-token_dtype0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_complex_sequences[False-seq_lens1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_single_mapping_gating[433-128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_score_accuracy[32-128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_negative_logits": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_score_accuracy[89-128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_single_mapping_gating[32-128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_single_mapping_gating[89-128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_single_mapping_gating[17-16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_single_mapping_gating[1-16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_score_accuracy[433-2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_score_accuracy[17-16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_determinism": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_score_accuracy[1-16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape0-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape4-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape7-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape5-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape1-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape3-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape2-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape4-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape3-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape6-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape5-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape7-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape6-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape1-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape2-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape0-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_multiple_prompts[prompt_lengths3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[256]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_gqa[head_config0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_continuation[seq_params2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_multiple_prompts[prompt_lengths1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[65]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_continuation[seq_params0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_head_size[128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_continuation[seq_params4]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_fully_composed": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_head_size[64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_continuation[seq_params1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_multiple_prompts[prompt_lengths0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_continuation[seq_params3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_gqa[head_config2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[33]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[2037]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_multiple_prompts[prompt_lengths2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_gqa[head_config1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_expert_variance[64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_in_out_channels[2048-8192]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_expert_variance[32]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_activation_types[ActivationType.RELU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_dtypes[dtype0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_activation_types[ActivationType.GELU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_activation_types[ActivationType.SILU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_successive_inputs": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_in_out_channels[4096-2048]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_in_out_channels[6144-3072]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_expert_variance[2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_dtypes[DtypeEnum.bf16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_act_fns[ActivationType.GELU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_dtypes[DtypeEnum.fp16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_single_expert[13-2048-2048]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_act_fns[ActivationType.SILU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_single_expert[256-1024-4096]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[4]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_single_expert[893-5120-2560]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_act_fns[ActivationType.RELU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_single_expert[278-5120-2048]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfailed. failure observed on vanilla as well.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "Test requires higher memory.", + "unit/ops/adam/test_adamw/TestAdamConfigs/test[AdamW-True-False-True-resulting_optimizer6]": + "Xfail, due to SW-176845", + "unit/ops/adam/test_adamw/TestAdamConfigs/test[AdamW-True-False-False-resulting_optimizer2]": + "Xfail, due to SW-176845", + "unit/ops/adam/test_adamw/TestAdamConfigs/test[Adam-True-False-True-resulting_optimizer14]": + "Xfail, due to SW-176845", + "unit/ops/adam/test_adamw/TestAdamConfigs/test[Adam-True-False-False-resulting_optimizer10]": + "Xfail, due to SW-176845", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo]": + "Xfail due to SW-177890 and SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=1]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=2]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=2]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=1]": + "Xfail due to SW-177889", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo]": + "Xfail due to SW-177889", + "unit/inference/test_inference.py::TestLowCpuMemUsage::test[gpt2-False]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": + "Xfail due to SW-177889", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype2]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype1]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype1]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype2]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype1]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype1]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype2]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype2]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile": + "Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compile_kwargs": + "Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compiler_fn": + "Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_kwargs": + "Nvidia Titan XP GPU not supported", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=2]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=1]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=1]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=2]": + "Xfail due to SW-177889", + "unit/inference/test_inference.py::TestLowCpuMemUsage::test[gpt2-False]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": + "Xfail due to SW-177889", + "unit/inference/v2/ragged/test_manager_configs.py::test_too_small_max_ragged_batch_size": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_zero_max_tracked_sequences": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_zero_max_ragged_batch_size": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_negative_max_ragged_batch_size": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_too_small_max_tracked_sequences": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_negative_max_tracked_sequences": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_zero_max_ragged_sequence_count": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_negative_max_ragged_sequence_count": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[0]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[3]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[Callable]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[Optimizer]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[None]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestConfigOptimizer::test[False]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestConfigOptimizer::test[True]": + "Xfail due to OOM", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest[True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_unfused_optimizer[True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fp32_optimizer[True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-True]": + "Compile tests not supported on Titan-XP", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "Xfail due to SW-182759", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-None]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-None]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-_LRScheduler]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-_LRScheduler]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Callable-Callable]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Callable-None]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-Callable]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-Callable]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Callable-_LRScheduler]": + "Cuda OOM", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]": + "Xfail, due to SW-178730.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-False]": + "Xfail due to SW-196379", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-196379", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-True-False]": + "Xfail due to SW-203720", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian-True-False]": + "Xfail due to SW-203720", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian-True-False]": + "Xfail due to SW-203720", +}